< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 8978 : imported patch remove_err_msg


 351   void print_hrm_post_compaction();
 352 
 353   // Create a memory mapper for auxiliary data structures of the given size and
 354   // translation factor.
 355   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 356                                                          size_t size,
 357                                                          size_t translation_factor);
 358 
 359   double verify(bool guard, const char* msg);
 360   void verify_before_gc();
 361   void verify_after_gc();
 362 
 363   void log_gc_header();
 364   void log_gc_footer(double pause_time_sec);
 365 
 366   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 367 
 368   // These are macros so that, if the assert fires, we get the correct
 369   // line number, file, etc.
 370 
 371 #define heap_locking_asserts_err_msg(_extra_message_)                         \
 372   err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",    \
 373           (_extra_message_),                                                  \
 374           BOOL_TO_STR(Heap_lock->owned_by_self()),                            \
 375           BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),               \
 376           BOOL_TO_STR(Thread::current()->is_VM_thread()))
 377 
 378 #define assert_heap_locked()                                                  \
 379   do {                                                                        \
 380     assert(Heap_lock->owned_by_self(),                                        \
 381            heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
 382   } while (0)
 383 
 384 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 385   do {                                                                        \
 386     assert(Heap_lock->owned_by_self() ||                                      \
 387            (SafepointSynchronize::is_at_safepoint() &&                        \
 388              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 389            heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
 390                                         "should be at a safepoint"));         \
 391   } while (0)
 392 
 393 #define assert_heap_locked_and_not_at_safepoint()                             \
 394   do {                                                                        \
 395     assert(Heap_lock->owned_by_self() &&                                      \
 396                                     !SafepointSynchronize::is_at_safepoint(), \
 397           heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
 398                                        "should not be at a safepoint"));      \
 399   } while (0)
 400 
 401 #define assert_heap_not_locked()                                              \
 402   do {                                                                        \
 403     assert(!Heap_lock->owned_by_self(),                                       \
 404         heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
 405   } while (0)
 406 
 407 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 408   do {                                                                        \
 409     assert(!Heap_lock->owned_by_self() &&                                     \
 410                                     !SafepointSynchronize::is_at_safepoint(), \
 411       heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
 412                                    "should not be at a safepoint"));          \
 413   } while (0)
 414 
 415 #define assert_at_safepoint(_should_be_vm_thread_)                            \
 416   do {                                                                        \
 417     assert(SafepointSynchronize::is_at_safepoint() &&                         \
 418               ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
 419            heap_locking_asserts_err_msg("should be at a safepoint"));         \
 420   } while (0)
 421 
 422 #define assert_not_at_safepoint()                                             \
 423   do {                                                                        \
 424     assert(!SafepointSynchronize::is_at_safepoint(),                          \
 425            heap_locking_asserts_err_msg("should not be at a safepoint"));     \
 426   } while (0)
 427 
 428 protected:
 429 
 430   // The young region list.
 431   YoungList*  _young_list;
 432 
 433   // The current policy object for the collector.
 434   G1CollectorPolicy* _g1_policy;
 435 
 436   // This is the second level of trying to allocate a new region. If
 437   // new_region() didn't find a region on the free_list, this call will
 438   // check whether there's anything available on the
 439   // secondary_free_list and/or wait for more regions to appear on
 440   // that list, if _free_regions_coming is set.
 441   HeapRegion* new_region_try_secondary_free_list(bool is_old);
 442 
 443   // Try to allocate a single non-humongous HeapRegion sufficient for
 444   // an allocation of the given word_size. If do_expand is true,
 445   // attempt to expand the heap if necessary to satisfy the allocation




 351   void print_hrm_post_compaction();
 352 
 353   // Create a memory mapper for auxiliary data structures of the given size and
 354   // translation factor.
 355   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 356                                                          size_t size,
 357                                                          size_t translation_factor);
 358 
 359   double verify(bool guard, const char* msg);
 360   void verify_before_gc();
 361   void verify_after_gc();
 362 
 363   void log_gc_header();
 364   void log_gc_footer(double pause_time_sec);
 365 
 366   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 367 
 368   // These are macros so that, if the assert fires, we get the correct
 369   // line number, file, etc.
 370 
 371 #define heap_locking_asserts_params(_extra_message_)                          \
 372   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 373   (_extra_message_),                                                          \
 374   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 375   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 376   BOOL_TO_STR(Thread::current()->is_VM_thread())
 377 
 378 #define assert_heap_locked()                                                  \
 379   do {                                                                        \
 380     assert(Heap_lock->owned_by_self(),                                        \
 381            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 382   } while (0)
 383 
 384 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 385   do {                                                                        \
 386     assert(Heap_lock->owned_by_self() ||                                      \
 387            (SafepointSynchronize::is_at_safepoint() &&                        \
 388              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 389            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 390                                         "should be at a safepoint"));         \
 391   } while (0)
 392 
 393 #define assert_heap_locked_and_not_at_safepoint()                             \
 394   do {                                                                        \
 395     assert(Heap_lock->owned_by_self() &&                                      \
 396                                     !SafepointSynchronize::is_at_safepoint(), \
 397           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 398                                        "should not be at a safepoint"));      \
 399   } while (0)
 400 
 401 #define assert_heap_not_locked()                                              \
 402   do {                                                                        \
 403     assert(!Heap_lock->owned_by_self(),                                       \
 404         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 405   } while (0)
 406 
 407 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 408   do {                                                                        \
 409     assert(!Heap_lock->owned_by_self() &&                                     \
 410                                     !SafepointSynchronize::is_at_safepoint(), \
 411       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 412                                    "should not be at a safepoint"));          \
 413   } while (0)
 414 
 415 #define assert_at_safepoint(_should_be_vm_thread_)                            \
 416   do {                                                                        \
 417     assert(SafepointSynchronize::is_at_safepoint() &&                         \
 418               ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
 419            heap_locking_asserts_params("should be at a safepoint"));          \
 420   } while (0)
 421 
 422 #define assert_not_at_safepoint()                                             \
 423   do {                                                                        \
 424     assert(!SafepointSynchronize::is_at_safepoint(),                          \
 425            heap_locking_asserts_params("should not be at a safepoint"));      \
 426   } while (0)
 427 
 428 protected:
 429 
 430   // The young region list.
 431   YoungList*  _young_list;
 432 
 433   // The current policy object for the collector.
 434   G1CollectorPolicy* _g1_policy;
 435 
 436   // This is the second level of trying to allocate a new region. If
 437   // new_region() didn't find a region on the free_list, this call will
 438   // check whether there's anything available on the
 439   // secondary_free_list and/or wait for more regions to appear on
 440   // that list, if _free_regions_coming is set.
 441   HeapRegion* new_region_try_secondary_free_list(bool is_old);
 442 
 443   // Try to allocate a single non-humongous HeapRegion sufficient for
 444   // an allocation of the given word_size. If do_expand is true,
 445   // attempt to expand the heap if necessary to satisfy the allocation


< prev index next >