< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page




 286   // this method will be found dead by the marking cycle).
 287   void allocate_dummy_regions() PRODUCT_RETURN;
 288 
 289   // Clear RSets after a compaction. It also resets the GC time stamps.
 290   void clear_rsets_post_compaction();
 291 
 292   // If the HR printer is active, dump the state of the regions in the
 293   // heap after a compaction.
 294   void print_hrm_post_compaction();
 295 
 296   // Create a memory mapper for auxiliary data structures of the given size and
 297   // translation factor.
 298   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 299                                                          size_t size,
 300                                                          size_t translation_factor);
 301 
 302   static G1Policy* create_g1_policy(STWGCTimer* gc_timer);
 303 
 304   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 305 
 306   void process_weak_jni_handles();
 307 
 308   // These are macros so that, if the assert fires, we get the correct
 309   // line number, file, etc.
 310 
 311 #define heap_locking_asserts_params(_extra_message_)                          \
 312   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 313   (_extra_message_),                                                          \
 314   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 315   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 316   BOOL_TO_STR(Thread::current()->is_VM_thread())
 317 
 318 #define assert_heap_locked()                                                  \
 319   do {                                                                        \
 320     assert(Heap_lock->owned_by_self(),                                        \
 321            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 322   } while (0)
 323 
 324 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 325   do {                                                                        \
 326     assert(Heap_lock->owned_by_self() ||                                      \
 327            (SafepointSynchronize::is_at_safepoint() &&                        \




 286   // this method will be found dead by the marking cycle).
 287   void allocate_dummy_regions() PRODUCT_RETURN;
 288 
 289   // Clear RSets after a compaction. It also resets the GC time stamps.
 290   void clear_rsets_post_compaction();
 291 
 292   // If the HR printer is active, dump the state of the regions in the
 293   // heap after a compaction.
 294   void print_hrm_post_compaction();
 295 
 296   // Create a memory mapper for auxiliary data structures of the given size and
 297   // translation factor.
 298   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 299                                                          size_t size,
 300                                                          size_t translation_factor);
 301 
 302   static G1Policy* create_g1_policy(STWGCTimer* gc_timer);
 303 
 304   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 305 


 306   // These are macros so that, if the assert fires, we get the correct
 307   // line number, file, etc.
 308 
 309 #define heap_locking_asserts_params(_extra_message_)                          \
 310   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 311   (_extra_message_),                                                          \
 312   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 313   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 314   BOOL_TO_STR(Thread::current()->is_VM_thread())
 315 
 316 #define assert_heap_locked()                                                  \
 317   do {                                                                        \
 318     assert(Heap_lock->owned_by_self(),                                        \
 319            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 320   } while (0)
 321 
 322 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 323   do {                                                                        \
 324     assert(Heap_lock->owned_by_self() ||                                      \
 325            (SafepointSynchronize::is_at_safepoint() &&                        \


< prev index next >