rev 57601 : [mq]: metaspace-improvement
1 /*
2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
25 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
26
27 #include "gc/shared/markBitMap.hpp"
28 #include "gc/shared/softRefPolicy.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shenandoah/shenandoahAsserts.hpp"
31 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
32 #include "gc/shenandoah/shenandoahLock.hpp"
33 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
34 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
35 #include "services/memoryManager.hpp"
36
37 class ConcurrentGCTimer;
38 class ReferenceProcessor;
39 class ShenandoahAllocTracker;
40 class ShenandoahCollectorPolicy;
41 class ShenandoahControlThread;
42 class ShenandoahGCSession;
43 class ShenandoahGCStateResetter;
44 class ShenandoahHeuristics;
45 class ShenandoahMarkingContext;
46 class ShenandoahMode;
47 class ShenandoahPhaseTimings;
48 class ShenandoahHeap;
49 class ShenandoahHeapRegion;
50 class ShenandoahHeapRegionClosure;
51 class ShenandoahCollectionSet;
52 class ShenandoahFreeSet;
53 class ShenandoahConcurrentMark;
54 class ShenandoahMarkCompact;
55 class ShenandoahMonitoringSupport;
56 class ShenandoahPacer;
57 class ShenandoahTraversalGC;
58 class ShenandoahVerifier;
59 class ShenandoahWorkGang;
60 class VMStructs;
61
62 class ShenandoahRegionIterator : public StackObj {
63 private:
64 ShenandoahHeap* _heap;
65
66 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
67 volatile size_t _index;
68 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
69
70 // No implicit copying: iterators should be passed by reference to capture the state
71 ShenandoahRegionIterator(const ShenandoahRegionIterator& that);
72 ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o);
73
74 public:
75 ShenandoahRegionIterator();
76 ShenandoahRegionIterator(ShenandoahHeap* heap);
77
78 // Reset iterator to default state
79 void reset();
80
81 // Returns next region, or NULL if there are no more regions.
82 // This is multi-thread-safe.
83 inline ShenandoahHeapRegion* next();
84
85 // This is *not* MT safe. However, in the absence of multithreaded access, it
86 // can be used to determine if there is more work to do.
87 bool has_next() const;
88 };
89
90 class ShenandoahHeapRegionClosure : public StackObj {
91 public:
92 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
93 virtual bool is_thread_safe() { return false; }
94 };
95
96 #ifdef ASSERT
97 class ShenandoahAssertToSpaceClosure : public OopClosure {
98 private:
99 template <class T>
100 void do_oop_work(T* p);
101 public:
102 void do_oop(narrowOop* p);
103 void do_oop(oop* p);
104 };
105 #endif
106
107 typedef ShenandoahLock ShenandoahHeapLock;
108 typedef ShenandoahLocker ShenandoahHeapLocker;
109
110 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
111 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
112 // See ShenandoahControlThread for GC cycle structure.
113 //
114 class ShenandoahHeap : public CollectedHeap {
115 friend class ShenandoahAsserts;
116 friend class VMStructs;
117 friend class ShenandoahGCSession;
118 friend class ShenandoahGCStateResetter;
119
120 // ---------- Locks that guard important data structures in Heap
121 //
122 private:
123 ShenandoahHeapLock _lock;
124
125 public:
126 ShenandoahHeapLock* lock() {
127 return &_lock;
128 }
129
130 void assert_heaplock_owned_by_current_thread() NOT_DEBUG_RETURN;
131 void assert_heaplock_not_owned_by_current_thread() NOT_DEBUG_RETURN;
132 void assert_heaplock_or_safepoint() NOT_DEBUG_RETURN;
133
134 // ---------- Initialization, termination, identification, printing routines
135 //
136 public:
137 static ShenandoahHeap* heap();
138 static ShenandoahHeap* heap_no_check();
139
140 const char* name() const { return "Shenandoah"; }
141 ShenandoahHeap::Name kind() const { return CollectedHeap::Shenandoah; }
142
143 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
144 jint initialize();
145 void post_initialize();
146 void initialize_heuristics();
147
148 void initialize_serviceability();
149
150 void print_on(outputStream* st) const;
151 void print_extended_on(outputStream *st) const;
152 void print_tracing_info() const;
153 void print_gc_threads_on(outputStream* st) const;
154 void print_heap_regions_on(outputStream* st) const;
155
156 void stop();
157
158 void prepare_for_verify();
159 void verify(VerifyOption vo);
160
161 // ---------- Heap counters and metrics
162 //
163 private:
164 size_t _initial_size;
165 size_t _minimum_size;
166 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
167 volatile size_t _used;
168 volatile size_t _committed;
169 volatile size_t _bytes_allocated_since_gc_start;
170 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
171
172 public:
173 void increase_used(size_t bytes);
174 void decrease_used(size_t bytes);
175 void set_used(size_t bytes);
176
177 void increase_committed(size_t bytes);
178 void decrease_committed(size_t bytes);
179 void increase_allocated(size_t bytes);
180
181 size_t bytes_allocated_since_gc_start();
182 void reset_bytes_allocated_since_gc_start();
183
184 size_t min_capacity() const;
185 size_t max_capacity() const;
186 size_t initial_capacity() const;
187 size_t capacity() const;
188 size_t used() const;
189 size_t committed() const;
190
191 // ---------- Workers handling
192 //
193 private:
194 uint _max_workers;
195 ShenandoahWorkGang* _workers;
196 ShenandoahWorkGang* _safepoint_workers;
197
198 public:
199 uint max_workers();
200 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
201
202 WorkGang* workers() const;
203 WorkGang* get_safepoint_workers();
204
205 void gc_threads_do(ThreadClosure* tcl) const;
206
207 // ---------- Heap regions handling machinery
208 //
209 private:
210 MemRegion _heap_region;
211 bool _heap_region_special;
212 size_t _num_regions;
213 ShenandoahHeapRegion** _regions;
214 ShenandoahRegionIterator _update_refs_iterator;
215
216 public:
217 inline size_t num_regions() const { return _num_regions; }
218 inline bool is_heap_region_special() { return _heap_region_special; }
219
220 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
221 inline size_t heap_region_index_containing(const void* addr) const;
222
223 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
224
225 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
226 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
227
228 // ---------- GC state machinery
229 //
230 // GC state describes the important parts of collector state, that may be
231 // used to make barrier selection decisions in the native and generated code.
232 // Multiple bits can be set at once.
233 //
234 // Important invariant: when GC state is zero, the heap is stable, and no barriers
235 // are required.
236 //
237 public:
238 enum GCStateBitPos {
239 // Heap has forwarded objects: needs LRB barriers.
240 HAS_FORWARDED_BITPOS = 0,
241
242 // Heap is under marking: needs SATB barriers.
243 MARKING_BITPOS = 1,
244
245 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
246 EVACUATION_BITPOS = 2,
247
248 // Heap is under updating: needs no additional barriers.
249 UPDATEREFS_BITPOS = 3,
250
251 // Heap is under traversal collection
252 TRAVERSAL_BITPOS = 4
253 };
254
255 enum GCState {
256 STABLE = 0,
257 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
258 MARKING = 1 << MARKING_BITPOS,
259 EVACUATION = 1 << EVACUATION_BITPOS,
260 UPDATEREFS = 1 << UPDATEREFS_BITPOS,
261 TRAVERSAL = 1 << TRAVERSAL_BITPOS
262 };
263
264 private:
265 ShenandoahSharedBitmap _gc_state;
266 ShenandoahSharedFlag _degenerated_gc_in_progress;
267 ShenandoahSharedFlag _full_gc_in_progress;
268 ShenandoahSharedFlag _full_gc_move_in_progress;
269 ShenandoahSharedFlag _progress_last_gc;
270
271 void set_gc_state_all_threads(char state);
272 void set_gc_state_mask(uint mask, bool value);
273
274 public:
275 char gc_state() const;
276 static address gc_state_addr();
277
278 void set_concurrent_mark_in_progress(bool in_progress);
279 void set_evacuation_in_progress(bool in_progress);
280 void set_update_refs_in_progress(bool in_progress);
281 void set_degenerated_gc_in_progress(bool in_progress);
282 void set_full_gc_in_progress(bool in_progress);
283 void set_full_gc_move_in_progress(bool in_progress);
284 void set_concurrent_traversal_in_progress(bool in_progress);
285 void set_has_forwarded_objects(bool cond);
286
287 inline bool is_stable() const;
288 inline bool is_idle() const;
289 inline bool is_concurrent_mark_in_progress() const;
290 inline bool is_update_refs_in_progress() const;
291 inline bool is_evacuation_in_progress() const;
292 inline bool is_degenerated_gc_in_progress() const;
293 inline bool is_full_gc_in_progress() const;
294 inline bool is_full_gc_move_in_progress() const;
295 inline bool is_concurrent_traversal_in_progress() const;
296 inline bool has_forwarded_objects() const;
297 inline bool is_gc_in_progress_mask(uint mask) const;
298
299 // ---------- GC cancellation and degeneration machinery
300 //
301 // Cancelled GC flag is used to notify concurrent phases that they should terminate.
302 //
303 public:
304 enum ShenandoahDegenPoint {
305 _degenerated_unset,
306 _degenerated_traversal,
307 _degenerated_outside_cycle,
308 _degenerated_mark,
309 _degenerated_evac,
310 _degenerated_updaterefs,
311 _DEGENERATED_LIMIT
312 };
313
314 static const char* degen_point_to_string(ShenandoahDegenPoint point) {
315 switch (point) {
316 case _degenerated_unset:
317 return "<UNSET>";
318 case _degenerated_traversal:
319 return "Traversal";
320 case _degenerated_outside_cycle:
321 return "Outside of Cycle";
322 case _degenerated_mark:
323 return "Mark";
324 case _degenerated_evac:
325 return "Evacuation";
326 case _degenerated_updaterefs:
327 return "Update Refs";
328 default:
329 ShouldNotReachHere();
330 return "ERROR";
331 }
332 };
333
334 private:
335 enum CancelState {
336 // Normal state. GC has not been cancelled and is open for cancellation.
337 // Worker threads can suspend for safepoint.
338 CANCELLABLE,
339
340 // GC has been cancelled. Worker threads can not suspend for
341 // safepoint but must finish their work as soon as possible.
342 CANCELLED,
343
344 // GC has not been cancelled and must not be cancelled. At least
345 // one worker thread checks for pending safepoint and may suspend
346 // if a safepoint is pending.
347 NOT_CANCELLED
348 };
349
350 ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
351 bool try_cancel_gc();
352
353 public:
354 static address cancelled_gc_addr();
355
356 inline bool cancelled_gc() const;
357 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
358
359 inline void clear_cancelled_gc();
360
361 void cancel_gc(GCCause::Cause cause);
362
363 // ---------- GC operations entry points
364 //
365 public:
366 // Entry points to STW GC operations, these cause a related safepoint, that then
367 // call the entry method below
368 void vmop_entry_init_mark();
369 void vmop_entry_final_mark();
370 void vmop_entry_final_evac();
371 void vmop_entry_init_updaterefs();
372 void vmop_entry_final_updaterefs();
373 void vmop_entry_init_traversal();
374 void vmop_entry_final_traversal();
375 void vmop_entry_full(GCCause::Cause cause);
376 void vmop_degenerated(ShenandoahDegenPoint point);
377
378 // Entry methods to normally STW GC operations. These set up logging, monitoring
379 // and workers for net VM operation
380 void entry_init_mark();
381 void entry_final_mark();
382 void entry_final_evac();
383 void entry_init_updaterefs();
384 void entry_final_updaterefs();
385 void entry_init_traversal();
386 void entry_final_traversal();
387 void entry_full(GCCause::Cause cause);
388 void entry_degenerated(int point);
389
390 // Entry methods to normally concurrent GC operations. These set up logging, monitoring
391 // for concurrent operation.
392 void entry_reset();
393 void entry_mark();
394 void entry_preclean();
395 void entry_roots();
396 void entry_cleanup();
397 void entry_evac();
398 void entry_updaterefs();
399 void entry_traversal();
400 void entry_uncommit(double shrink_before);
401
402 private:
403 // Actual work for the phases
404 void op_init_mark();
405 void op_final_mark();
406 void op_final_evac();
407 void op_init_updaterefs();
408 void op_final_updaterefs();
409 void op_init_traversal();
410 void op_final_traversal();
411 void op_full(GCCause::Cause cause);
412 void op_degenerated(ShenandoahDegenPoint point);
413 void op_degenerated_fail();
414 void op_degenerated_futile();
415
416 void op_reset();
417 void op_mark();
418 void op_preclean();
419 void op_roots();
420 void op_cleanup();
421 void op_conc_evac();
422 void op_stw_evac();
423 void op_updaterefs();
424 void op_traversal();
425 void op_uncommit(double shrink_before);
426
427 // Messages for GC trace events, they have to be immortal for
428 // passing around the logging/tracing systems
429 const char* init_mark_event_message() const;
430 const char* final_mark_event_message() const;
431 const char* conc_mark_event_message() const;
432 const char* degen_event_message(ShenandoahDegenPoint point) const;
433
434 // ---------- GC subsystems
435 //
436 private:
437 ShenandoahControlThread* _control_thread;
438 ShenandoahCollectorPolicy* _shenandoah_policy;
439 ShenandoahMode* _gc_mode;
440 ShenandoahHeuristics* _heuristics;
441 ShenandoahFreeSet* _free_set;
442 ShenandoahConcurrentMark* _scm;
443 ShenandoahTraversalGC* _traversal_gc;
444 ShenandoahMarkCompact* _full_gc;
445 ShenandoahPacer* _pacer;
446 ShenandoahVerifier* _verifier;
447
448 ShenandoahAllocTracker* _alloc_tracker;
449 ShenandoahPhaseTimings* _phase_timings;
450
451 ShenandoahControlThread* control_thread() { return _control_thread; }
452 ShenandoahMarkCompact* full_gc() { return _full_gc; }
453
454 public:
455 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
456 ShenandoahHeuristics* heuristics() const { return _heuristics; }
457 ShenandoahFreeSet* free_set() const { return _free_set; }
458 ShenandoahConcurrentMark* concurrent_mark() { return _scm; }
459 ShenandoahTraversalGC* traversal_gc() const { return _traversal_gc; }
460 bool is_traversal_mode() const { return _traversal_gc != NULL; }
461 ShenandoahPacer* pacer() const { return _pacer; }
462
463 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
464 ShenandoahAllocTracker* alloc_tracker() const { return _alloc_tracker; }
465
466 ShenandoahVerifier* verifier();
467
468 // ---------- VM subsystem bindings
469 //
470 private:
471 ShenandoahMonitoringSupport* _monitoring_support;
472 MemoryPool* _memory_pool;
473 GCMemoryManager _stw_memory_manager;
474 GCMemoryManager _cycle_memory_manager;
475 ConcurrentGCTimer* _gc_timer;
476 SoftRefPolicy _soft_ref_policy;
477
478 // For exporting to SA
479 int _log_min_obj_alignment_in_bytes;
480 public:
481 ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
482 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
483 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
484 SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
485
486 GrowableArray<GCMemoryManager*> memory_managers();
487 GrowableArray<MemoryPool*> memory_pools();
488 MemoryUsage memory_usage();
489 GCTracer* tracer();
490 GCTimer* gc_timer() const;
491
492 // ---------- Reference processing
493 //
494 private:
495 AlwaysTrueClosure _subject_to_discovery;
496 ReferenceProcessor* _ref_processor;
497 ShenandoahSharedFlag _process_references;
498
499 void ref_processing_init();
500
501 public:
502 ReferenceProcessor* ref_processor() { return _ref_processor; }
503 void set_process_references(bool pr);
504 bool process_references() const;
505
506 // ---------- Class Unloading
507 //
508 private:
509 ShenandoahSharedFlag _unload_classes;
510
511 public:
512 void set_unload_classes(bool uc);
513 bool unload_classes() const;
514
515 // Perform STW class unloading and weak root cleaning
516 void parallel_cleaning(bool full_gc);
517
518 private:
519 void stw_unload_classes(bool full_gc);
520 void stw_process_weak_roots(bool full_gc);
521
522 // ---------- Generic interface hooks
523 // Minor things that super-interface expects us to implement to play nice with
524 // the rest of runtime. Some of the things here are not required to be implemented,
525 // and can be stubbed out.
526 //
527 public:
528 AdaptiveSizePolicy* size_policy() shenandoah_not_implemented_return(NULL);
529 bool is_maximal_no_gc() const shenandoah_not_implemented_return(false);
530
531 bool is_in(const void* p) const;
532
533 void collect(GCCause::Cause cause);
534 void do_full_collection(bool clear_all_soft_refs);
535
536 // Used for parsing heap during error printing
537 HeapWord* block_start(const void* addr) const;
538 bool block_is_obj(const HeapWord* addr) const;
539
540 // Used for native heap walkers: heap dumpers, mostly
541 void object_iterate(ObjectClosure* cl);
542 void safe_object_iterate(ObjectClosure* cl);
543
544 // Used by RMI
545 jlong millis_since_last_gc();
546
547 // ---------- Safepoint interface hooks
548 //
549 public:
550 void safepoint_synchronize_begin();
551 void safepoint_synchronize_end();
552
553 // ---------- Code roots handling hooks
554 //
555 public:
556 void register_nmethod(nmethod* nm);
557 void unregister_nmethod(nmethod* nm);
558 void flush_nmethod(nmethod* nm) {}
559 void verify_nmethod(nmethod* nm) {}
560
561 // ---------- Pinning hooks
562 //
563 public:
564 // Shenandoah supports per-object (per-region) pinning
565 bool supports_object_pinning() const { return true; }
566
567 oop pin_object(JavaThread* thread, oop obj);
568 void unpin_object(JavaThread* thread, oop obj);
569
570 // ---------- Allocation support
571 //
572 private:
573 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
574 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
575 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
576 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
577 void retire_and_reset_gclabs();
578
579 public:
580 HeapWord* allocate_memory(ShenandoahAllocRequest& request);
581 HeapWord* mem_allocate(size_t size, bool* what);
582 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
583 size_t size,
584 Metaspace::MetadataType mdtype);
585
586 void notify_mutator_alloc_words(size_t words, bool waste);
587
588 // Shenandoah supports TLAB allocation
589 bool supports_tlab_allocation() const { return true; }
590
591 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size);
592 size_t tlab_capacity(Thread *thr) const;
593 size_t unsafe_max_tlab_alloc(Thread *thread) const;
594 size_t max_tlab_size() const;
595 size_t tlab_used(Thread* ignored) const;
596
597 void resize_tlabs();
598
599 void ensure_parsability(bool retire_tlabs);
600 void make_parsable(bool retire_tlabs);
601
602 // ---------- Marking support
603 //
604 private:
605 ShenandoahMarkingContext* _marking_context;
606 MemRegion _bitmap_region;
607 MemRegion _aux_bitmap_region;
608 MarkBitMap _verification_bit_map;
609 MarkBitMap _aux_bit_map;
610
611 size_t _bitmap_size;
612 size_t _bitmap_regions_per_slice;
613 size_t _bitmap_bytes_per_slice;
614
615 bool _bitmap_region_special;
616 bool _aux_bitmap_region_special;
617
618 // Used for buffering per-region liveness data.
619 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
620 //
621 // The array has max-workers elements, each of which is an array of
622 // jushort * max_regions. The choice of jushort is not accidental:
623 // there is a tradeoff between static/dynamic footprint that translates
624 // into cache pressure (which is already high during marking), and
625 // too many atomic updates. size_t/jint is too large, jbyte is too small.
626 jushort** _liveness_cache;
627
628 public:
629 inline ShenandoahMarkingContext* complete_marking_context() const;
630 inline ShenandoahMarkingContext* marking_context() const;
631 inline void mark_complete_marking_context();
632 inline void mark_incomplete_marking_context();
633
634 template<class T>
635 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
636
637 template<class T>
638 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
639
640 template<class T>
641 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
642
643 void reset_mark_bitmap();
644
645 // SATB barriers hooks
646 template<bool RESOLVE>
647 inline bool requires_marking(const void* entry) const;
648 void force_satb_flush_all_threads();
649
650 // Support for bitmap uncommits
651 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
652 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
653 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
654
655 // Liveness caching support
656 jushort* get_liveness_cache(uint worker_id);
657 void flush_liveness_cache(uint worker_id);
658
659 // ---------- Evacuation support
660 //
661 private:
662 ShenandoahCollectionSet* _collection_set;
663 ShenandoahEvacOOMHandler _oom_evac_handler;
664
665 void evacuate_and_update_roots();
666
667 public:
668 static address in_cset_fast_test_addr();
669
670 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
671
672 template <class T>
673 inline bool in_collection_set(T obj) const;
674
675 // Avoid accidentally calling the method above with ShenandoahHeapRegion*, which would be *wrong*.
676 inline bool in_collection_set(ShenandoahHeapRegion* r) shenandoah_not_implemented_return(false);
677
678 // Evacuates object src. Returns the evacuated object, either evacuated
679 // by this thread, or by some other thread.
680 inline oop evacuate_object(oop src, Thread* thread);
681
682 // Call before/after evacuation.
683 void enter_evacuation();
684 void leave_evacuation();
685
686 // ---------- Helper functions
687 //
688 public:
689 template <class T>
690 inline oop evac_update_with_forwarded(T* p);
691
692 template <class T>
693 inline oop maybe_update_with_forwarded(T* p);
694
695 template <class T>
696 inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
697
698 template <class T>
699 inline oop update_with_forwarded_not_null(T* p, oop obj);
700
701 static inline oop cas_oop(oop n, narrowOop* addr, oop c);
702 static inline oop cas_oop(oop n, oop* addr, oop c);
703
704 void trash_humongous_region_at(ShenandoahHeapRegion *r);
705
706 void deduplicate_string(oop str);
707
708 void stop_concurrent_marking();
709
710 private:
711 void trash_cset_regions();
712 void update_heap_references(bool concurrent);
713
714 // ---------- Testing helpers functions
715 //
716 private:
717 ShenandoahSharedFlag _inject_alloc_failure;
718
719 void try_inject_alloc_failure();
720 bool should_inject_alloc_failure();
721 };
722
723 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
--- EOF ---