< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page




 270   // this method will be found dead by the marking cycle).
 271   void allocate_dummy_regions() PRODUCT_RETURN;
 272 
 273   // Clear RSets after a compaction. It also resets the GC time stamps.
 274   void clear_rsets_post_compaction();
 275 
 276   // If the HR printer is active, dump the state of the regions in the
 277   // heap after a compaction.
 278   void print_hrm_post_compaction();
 279 
 280   // Create a memory mapper for auxiliary data structures of the given size and
 281   // translation factor.
 282   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 283                                                          size_t size,
 284                                                          size_t translation_factor);
 285 
 286   double verify(bool guard, const char* msg);
 287   void verify_before_gc();
 288   void verify_after_gc();
 289 
 290   void log_gc_header();
 291   void log_gc_footer(double pause_time_sec);
 292 
 293   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 294 
 295   // These are macros so that, if the assert fires, we get the correct
 296   // line number, file, etc.
 297 
 298 #define heap_locking_asserts_params(_extra_message_)                          \
 299   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 300   (_extra_message_),                                                          \
 301   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 302   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 303   BOOL_TO_STR(Thread::current()->is_VM_thread())
 304 
 305 #define assert_heap_locked()                                                  \
 306   do {                                                                        \
 307     assert(Heap_lock->owned_by_self(),                                        \
 308            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 309   } while (0)
 310 
 311 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \


 683 
 684   // Insert any required filler objects in the G1 regions around the specified
 685   // ranges to make the regions parseable. This must be called after
 686   // alloc_archive_regions, and after class loading has occurred.
 687   void fill_archive_regions(MemRegion* range, size_t count);
 688 
 689   // For each of the specified MemRegions, uncommit the containing G1 regions
 690   // which had been allocated by alloc_archive_regions. This should be called
 691   // rather than fill_archive_regions at JVM init time if the archive file
 692   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 693   void dealloc_archive_regions(MemRegion* range, size_t count);
 694 
 695 protected:
 696 
 697   // Shrink the garbage-first heap by at most the given size (in bytes!).
 698   // (Rounds down to a HeapRegion boundary.)
 699   virtual void shrink(size_t expand_bytes);
 700   void shrink_helper(size_t expand_bytes);
 701 
 702   #if TASKQUEUE_STATS
 703   static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 704   void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
 705   void reset_taskqueue_stats();
 706   #endif // TASKQUEUE_STATS
 707 
 708   // Schedule the VM operation that will do an evacuation pause to
 709   // satisfy an allocation request of word_size. *succeeded will
 710   // return whether the VM operation was successful (it did do an
 711   // evacuation pause) or not (another thread beat us to it or the GC
 712   // locker was active). Given that we should not be holding the
 713   // Heap_lock when we enter this method, we will pass the
 714   // gc_count_before (i.e., total_collections()) as a parameter since
 715   // it has to be read while holding the Heap_lock. Currently, both
 716   // methods that call do_collection_pause() release the Heap_lock
 717   // before the call, so it's easy to read gc_count_before just before.
 718   HeapWord* do_collection_pause(size_t         word_size,
 719                                 uint           gc_count_before,
 720                                 bool*          succeeded,
 721                                 GCCause::Cause gc_cause);
 722 
 723   void wait_for_root_region_scanning();
 724 
 725   // The guts of the incremental collection pause, executed by the vm
 726   // thread. It returns false if it is unable to do the collection due
 727   // to the GC locker being active, true otherwise
 728   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 729 
 730   // Actually do the work of evacuating the collection set.
 731   virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
 732 
 733   void pre_evacuate_collection_set();
 734   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 735 
 736   // Print the header for the per-thread termination statistics.
 737   static void print_termination_stats_hdr(outputStream* const st);
 738   // Print actual per-thread termination statistics.
 739   void print_termination_stats(outputStream* const st,
 740                                uint worker_id,
 741                                double elapsed_ms,
 742                                double strong_roots_ms,
 743                                double term_ms,
 744                                size_t term_attempts,
 745                                size_t alloc_buffer_waste,
 746                                size_t undo_waste) const;
 747   // Update object copying statistics.
 748   void record_obj_copy_mem_stats();
 749 
 750   // The g1 remembered set of the heap.
 751   G1RemSet* _g1_rem_set;
 752 
 753   // A set of cards that cover the objects for which the Rsets should be updated
 754   // concurrently after the collection.
 755   DirtyCardQueueSet _dirty_card_queue_set;
 756 
 757   // The closure used to refine a single card.
 758   RefineCardTableEntryClosure* _refine_cte_cl;
 759 
 760   // A DirtyCardQueueSet that is used to hold cards that contain


1490   // Verification
1491 
1492   // Perform any cleanup actions necessary before allowing a verification.
1493   virtual void prepare_for_verify();
1494 
1495   // Perform verification.
1496 
1497   // vo == UsePrevMarking  -> use "prev" marking information,
1498   // vo == UseNextMarking -> use "next" marking information
1499   // vo == UseMarkWord    -> use the mark word in the object header
1500   //
1501   // NOTE: Only the "prev" marking information is guaranteed to be
1502   // consistent most of the time, so most calls to this should use
1503   // vo == UsePrevMarking.
1504   // Currently, there is only one case where this is called with
1505   // vo == UseNextMarking, which is to verify the "next" marking
1506   // information at the end of remark.
1507   // Currently there is only one place where this is called with
1508   // vo == UseMarkWord, which is to verify the marking during a
1509   // full GC.
1510   void verify(bool silent, VerifyOption vo);
1511 
1512   // Override; it uses the "prev" marking information
1513   virtual void verify(bool silent);
1514 
1515   // The methods below are here for convenience and dispatch the
1516   // appropriate method depending on value of the given VerifyOption
1517   // parameter. The values for that parameter, and their meanings,
1518   // are the same as those above.
1519 
1520   bool is_obj_dead_cond(const oop obj,
1521                         const HeapRegion* hr,
1522                         const VerifyOption vo) const;
1523 
1524   bool is_obj_dead_cond(const oop obj,
1525                         const VerifyOption vo) const;
1526 
1527   G1HeapSummary create_g1_heap_summary();
1528   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1529 
1530   // Printing
1531 
1532   virtual void print_on(outputStream* st) const;
1533   virtual void print_extended_on(outputStream* st) const;


 270   // this method will be found dead by the marking cycle).
 271   void allocate_dummy_regions() PRODUCT_RETURN;
 272 
 273   // Clear RSets after a compaction. It also resets the GC time stamps.
 274   void clear_rsets_post_compaction();
 275 
 276   // If the HR printer is active, dump the state of the regions in the
 277   // heap after a compaction.
 278   void print_hrm_post_compaction();
 279 
 280   // Create a memory mapper for auxiliary data structures of the given size and
 281   // translation factor.
 282   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 283                                                          size_t size,
 284                                                          size_t translation_factor);
 285 
 286   double verify(bool guard, const char* msg);
 287   void verify_before_gc();
 288   void verify_after_gc();
 289 
 290   void log_gc_footer(double pause_time_counter);

 291 
 292   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 293 
 294   // These are macros so that, if the assert fires, we get the correct
 295   // line number, file, etc.
 296 
 297 #define heap_locking_asserts_params(_extra_message_)                          \
 298   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 299   (_extra_message_),                                                          \
 300   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 301   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 302   BOOL_TO_STR(Thread::current()->is_VM_thread())
 303 
 304 #define assert_heap_locked()                                                  \
 305   do {                                                                        \
 306     assert(Heap_lock->owned_by_self(),                                        \
 307            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 308   } while (0)
 309 
 310 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \


 682 
 683   // Insert any required filler objects in the G1 regions around the specified
 684   // ranges to make the regions parseable. This must be called after
 685   // alloc_archive_regions, and after class loading has occurred.
 686   void fill_archive_regions(MemRegion* range, size_t count);
 687 
 688   // For each of the specified MemRegions, uncommit the containing G1 regions
 689   // which had been allocated by alloc_archive_regions. This should be called
 690   // rather than fill_archive_regions at JVM init time if the archive file
 691   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 692   void dealloc_archive_regions(MemRegion* range, size_t count);
 693 
 694 protected:
 695 
 696   // Shrink the garbage-first heap by at most the given size (in bytes!).
 697   // (Rounds down to a HeapRegion boundary.)
 698   virtual void shrink(size_t expand_bytes);
 699   void shrink_helper(size_t expand_bytes);
 700 
 701   #if TASKQUEUE_STATS
 702   static void print_taskqueue_stats_hdr(outputStream* const st);
 703   void print_taskqueue_stats() const;
 704   void reset_taskqueue_stats();
 705   #endif // TASKQUEUE_STATS
 706 
 707   // Schedule the VM operation that will do an evacuation pause to
 708   // satisfy an allocation request of word_size. *succeeded will
 709   // return whether the VM operation was successful (it did do an
 710   // evacuation pause) or not (another thread beat us to it or the GC
 711   // locker was active). Given that we should not be holding the
 712   // Heap_lock when we enter this method, we will pass the
 713   // gc_count_before (i.e., total_collections()) as a parameter since
 714   // it has to be read while holding the Heap_lock. Currently, both
 715   // methods that call do_collection_pause() release the Heap_lock
 716   // before the call, so it's easy to read gc_count_before just before.
 717   HeapWord* do_collection_pause(size_t         word_size,
 718                                 uint           gc_count_before,
 719                                 bool*          succeeded,
 720                                 GCCause::Cause gc_cause);
 721 
 722   void wait_for_root_region_scanning();
 723 
 724   // The guts of the incremental collection pause, executed by the vm
 725   // thread. It returns false if it is unable to do the collection due
 726   // to the GC locker being active, true otherwise
 727   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 728 
 729   // Actually do the work of evacuating the collection set.
 730   virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
 731 
 732   void pre_evacuate_collection_set();
 733   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 734 
 735   // Print the header for the per-thread termination statistics.
 736   static void print_termination_stats_hdr();
 737   // Print actual per-thread termination statistics.
 738   void print_termination_stats(uint worker_id,

 739                                double elapsed_ms,
 740                                double strong_roots_ms,
 741                                double term_ms,
 742                                size_t term_attempts,
 743                                size_t alloc_buffer_waste,
 744                                size_t undo_waste) const;
 745   // Update object copying statistics.
 746   void record_obj_copy_mem_stats();
 747 
 748   // The g1 remembered set of the heap.
 749   G1RemSet* _g1_rem_set;
 750 
 751   // A set of cards that cover the objects for which the Rsets should be updated
 752   // concurrently after the collection.
 753   DirtyCardQueueSet _dirty_card_queue_set;
 754 
 755   // The closure used to refine a single card.
 756   RefineCardTableEntryClosure* _refine_cte_cl;
 757 
 758   // A DirtyCardQueueSet that is used to hold cards that contain


1488   // Verification
1489 
1490   // Perform any cleanup actions necessary before allowing a verification.
1491   virtual void prepare_for_verify();
1492 
1493   // Perform verification.
1494 
1495   // vo == UsePrevMarking  -> use "prev" marking information,
1496   // vo == UseNextMarking -> use "next" marking information
1497   // vo == UseMarkWord    -> use the mark word in the object header
1498   //
1499   // NOTE: Only the "prev" marking information is guaranteed to be
1500   // consistent most of the time, so most calls to this should use
1501   // vo == UsePrevMarking.
1502   // Currently, there is only one case where this is called with
1503   // vo == UseNextMarking, which is to verify the "next" marking
1504   // information at the end of remark.
1505   // Currently there is only one place where this is called with
1506   // vo == UseMarkWord, which is to verify the marking during a
1507   // full GC.
1508   void verify(VerifyOption vo);



1509 
1510   // The methods below are here for convenience and dispatch the
1511   // appropriate method depending on value of the given VerifyOption
1512   // parameter. The values for that parameter, and their meanings,
1513   // are the same as those above.
1514 
1515   bool is_obj_dead_cond(const oop obj,
1516                         const HeapRegion* hr,
1517                         const VerifyOption vo) const;
1518 
1519   bool is_obj_dead_cond(const oop obj,
1520                         const VerifyOption vo) const;
1521 
1522   G1HeapSummary create_g1_heap_summary();
1523   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1524 
1525   // Printing
1526 
1527   virtual void print_on(outputStream* st) const;
1528   virtual void print_extended_on(outputStream* st) const;
< prev index next >