< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page




 274   // called at the end of a GC and artificially expands the heap by
 275   // allocating a number of dead regions. This way we can induce very
 276   // frequent marking cycles and stress the cleanup / concurrent
 277   // cleanup code more (as all the regions that will be allocated by
 278   // this method will be found dead by the marking cycle).
 279   void allocate_dummy_regions() PRODUCT_RETURN;
 280 
 281   // Clear RSets after a compaction. It also resets the GC time stamps.
 282   void clear_rsets_post_compaction();
 283 
 284   // If the HR printer is active, dump the state of the regions in the
 285   // heap after a compaction.
 286   void print_hrm_post_compaction();
 287 
 288   // Create a memory mapper for auxiliary data structures of the given size and
 289   // translation factor.
 290   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 291                                                          size_t size,
 292                                                          size_t translation_factor);
 293 
 294   void log_gc_footer(jlong pause_time_counter);
 295 
 296   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 297 
 298   void process_weak_jni_handles();
 299 
 300   // These are macros so that, if the assert fires, we get the correct
 301   // line number, file, etc.
 302 
 303 #define heap_locking_asserts_params(_extra_message_)                          \
 304   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 305   (_extra_message_),                                                          \
 306   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 307   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 308   BOOL_TO_STR(Thread::current()->is_VM_thread())
 309 
 310 #define assert_heap_locked()                                                  \
 311   do {                                                                        \
 312     assert(Heap_lock->owned_by_self(),                                        \
 313            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 314   } while (0)




 274   // called at the end of a GC and artificially expands the heap by
 275   // allocating a number of dead regions. This way we can induce very
 276   // frequent marking cycles and stress the cleanup / concurrent
 277   // cleanup code more (as all the regions that will be allocated by
 278   // this method will be found dead by the marking cycle).
 279   void allocate_dummy_regions() PRODUCT_RETURN;
 280 
 281   // Clear RSets after a compaction. It also resets the GC time stamps.
 282   void clear_rsets_post_compaction();
 283 
 284   // If the HR printer is active, dump the state of the regions in the
 285   // heap after a compaction.
 286   void print_hrm_post_compaction();
 287 
 288   // Create a memory mapper for auxiliary data structures of the given size and
 289   // translation factor.
 290   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 291                                                          size_t size,
 292                                                          size_t translation_factor);
 293 
 294   void log_gc_footer();
 295 
 296   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 297 
 298   void process_weak_jni_handles();
 299 
 300   // These are macros so that, if the assert fires, we get the correct
 301   // line number, file, etc.
 302 
 303 #define heap_locking_asserts_params(_extra_message_)                          \
 304   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 305   (_extra_message_),                                                          \
 306   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 307   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 308   BOOL_TO_STR(Thread::current()->is_VM_thread())
 309 
 310 #define assert_heap_locked()                                                  \
 311   do {                                                                        \
 312     assert(Heap_lock->owned_by_self(),                                        \
 313            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 314   } while (0)


< prev index next >