< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page




 270   // this method will be found dead by the marking cycle).
 271   void allocate_dummy_regions() PRODUCT_RETURN;
 272 
 273   // Clear RSets after a compaction. It also resets the GC time stamps.
 274   void clear_rsets_post_compaction();
 275 
 276   // If the HR printer is active, dump the state of the regions in the
 277   // heap after a compaction.
 278   void print_hrm_post_compaction();
 279 
 280   // Create a memory mapper for auxiliary data structures of the given size and
 281   // translation factor.
 282   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 283                                                          size_t size,
 284                                                          size_t translation_factor);
 285 
 286   double verify(bool guard, const char* msg);
 287   void verify_before_gc();
 288   void verify_after_gc();
 289 
 290   void log_gc_header();
 291   void log_gc_footer(double pause_time_sec);
 292 
 293   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 294 
 295   // These are macros so that, if the assert fires, we get the correct
 296   // line number, file, etc.
 297 
 298 #define heap_locking_asserts_params(_extra_message_)                          \
 299   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 300   (_extra_message_),                                                          \
 301   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 302   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 303   BOOL_TO_STR(Thread::current()->is_VM_thread())
 304 
 305 #define assert_heap_locked()                                                  \
 306   do {                                                                        \
 307     assert(Heap_lock->owned_by_self(),                                        \
 308            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 309   } while (0)
 310 
 311 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \


 683 
 684   // Insert any required filler objects in the G1 regions around the specified
 685   // ranges to make the regions parseable. This must be called after
 686   // alloc_archive_regions, and after class loading has occurred.
 687   void fill_archive_regions(MemRegion* range, size_t count);
 688 
 689   // For each of the specified MemRegions, uncommit the containing G1 regions
 690   // which had been allocated by alloc_archive_regions. This should be called
 691   // rather than fill_archive_regions at JVM init time if the archive file
 692   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 693   void dealloc_archive_regions(MemRegion* range, size_t count);
 694 
 695 protected:
 696 
 697   // Shrink the garbage-first heap by at most the given size (in bytes!).
 698   // (Rounds down to a HeapRegion boundary.)
 699   virtual void shrink(size_t expand_bytes);
 700   void shrink_helper(size_t expand_bytes);
 701 
 702   #if TASKQUEUE_STATS
 703   static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 704   void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
 705   void reset_taskqueue_stats();
 706   #endif // TASKQUEUE_STATS
 707 
 708   // Schedule the VM operation that will do an evacuation pause to
 709   // satisfy an allocation request of word_size. *succeeded will
 710   // return whether the VM operation was successful (it did do an
 711   // evacuation pause) or not (another thread beat us to it or the GC
 712   // locker was active). Given that we should not be holding the
 713   // Heap_lock when we enter this method, we will pass the
 714   // gc_count_before (i.e., total_collections()) as a parameter since
 715   // it has to be read while holding the Heap_lock. Currently, both
 716   // methods that call do_collection_pause() release the Heap_lock
 717   // before the call, so it's easy to read gc_count_before just before.
 718   HeapWord* do_collection_pause(size_t         word_size,
 719                                 uint           gc_count_before,
 720                                 bool*          succeeded,
 721                                 GCCause::Cause gc_cause);
 722 
 723   void wait_for_root_region_scanning();
 724 
 725   // The guts of the incremental collection pause, executed by the vm
 726   // thread. It returns false if it is unable to do the collection due
 727   // to the GC locker being active, true otherwise
 728   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 729 
 730   // Actually do the work of evacuating the collection set.
 731   virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
 732 
 733   void pre_evacuate_collection_set();
 734   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 735 
 736   // Print the header for the per-thread termination statistics.
 737   static void print_termination_stats_hdr(outputStream* const st);
 738   // Print actual per-thread termination statistics.
 739   void print_termination_stats(outputStream* const st,
 740                                uint worker_id,
 741                                double elapsed_ms,
 742                                double strong_roots_ms,
 743                                double term_ms,
 744                                size_t term_attempts,
 745                                size_t alloc_buffer_waste,
 746                                size_t undo_waste) const;
 747   // Update object copying statistics.
 748   void record_obj_copy_mem_stats();
 749 
 750   // The g1 remembered set of the heap.
 751   G1RemSet* _g1_rem_set;
 752 
 753   // A set of cards that cover the objects for which the Rsets should be updated
 754   // concurrently after the collection.
 755   DirtyCardQueueSet _dirty_card_queue_set;
 756 
 757   // The closure used to refine a single card.
 758   RefineCardTableEntryClosure* _refine_cte_cl;
 759 
 760   // After a collection pause, make the regions in the CS into free


 954   // Initialize the G1CollectedHeap to have the initial and
 955   // maximum sizes and remembered and barrier sets
 956   // specified by the policy object.
 957   jint initialize();
 958 
 959   virtual void stop();
 960 
 961   // Return the (conservative) maximum heap alignment for any G1 heap
 962   static size_t conservative_max_heap_alignment();
 963 
 964   // Does operations required after initialization has been done.
 965   void post_initialize();
 966 
 967   // Initialize weak reference processing.
 968   void ref_processing_init();
 969 
 970   virtual Name kind() const {
 971     return CollectedHeap::G1CollectedHeap;
 972   }
 973 




 974   const G1CollectorState* collector_state() const { return &_collector_state; }
 975   G1CollectorState* collector_state() { return &_collector_state; }
 976 
 977   // The current policy object for the collector.
 978   G1CollectorPolicy* g1_policy() const { return _g1_policy; }
 979 
 980   virtual CollectorPolicy* collector_policy() const;
 981 
 982   // Adaptive size policy.  No such thing for g1.
 983   virtual AdaptiveSizePolicy* size_policy() { return NULL; }
 984 
 985   // The rem set and barrier set.
 986   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
 987 
 988   unsigned get_gc_time_stamp() {
 989     return _gc_time_stamp;
 990   }
 991 
 992   inline void reset_gc_time_stamp();
 993 


1475   // Verification
1476 
1477   // Perform any cleanup actions necessary before allowing a verification.
1478   virtual void prepare_for_verify();
1479 
1480   // Perform verification.
1481 
1482   // vo == UsePrevMarking  -> use "prev" marking information,
1483   // vo == UseNextMarking -> use "next" marking information
1484   // vo == UseMarkWord    -> use the mark word in the object header
1485   //
1486   // NOTE: Only the "prev" marking information is guaranteed to be
1487   // consistent most of the time, so most calls to this should use
1488   // vo == UsePrevMarking.
1489   // Currently, there is only one case where this is called with
1490   // vo == UseNextMarking, which is to verify the "next" marking
1491   // information at the end of remark.
1492   // Currently there is only one place where this is called with
1493   // vo == UseMarkWord, which is to verify the marking during a
1494   // full GC.
1495   void verify(bool silent, VerifyOption vo);
1496 
1497   // Override; it uses the "prev" marking information
1498   virtual void verify(bool silent);
1499 
1500   // The methods below are here for convenience and dispatch the
1501   // appropriate method depending on value of the given VerifyOption
1502   // parameter. The values for that parameter, and their meanings,
1503   // are the same as those above.
1504 
1505   bool is_obj_dead_cond(const oop obj,
1506                         const HeapRegion* hr,
1507                         const VerifyOption vo) const;
1508 
1509   bool is_obj_dead_cond(const oop obj,
1510                         const VerifyOption vo) const;
1511 
1512   G1HeapSummary create_g1_heap_summary();
1513   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1514 
1515   // Printing
1516 
1517   virtual void print_on(outputStream* st) const;
1518   virtual void print_extended_on(outputStream* st) const;


 270   // this method will be found dead by the marking cycle).
 271   void allocate_dummy_regions() PRODUCT_RETURN;
 272 
 273   // Clear RSets after a compaction. It also resets the GC time stamps.
 274   void clear_rsets_post_compaction();
 275 
 276   // If the HR printer is active, dump the state of the regions in the
 277   // heap after a compaction.
 278   void print_hrm_post_compaction();
 279 
 280   // Create a memory mapper for auxiliary data structures of the given size and
 281   // translation factor.
 282   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 283                                                          size_t size,
 284                                                          size_t translation_factor);
 285 
 286   double verify(bool guard, const char* msg);
 287   void verify_before_gc();
 288   void verify_after_gc();
 289 
 290   void log_gc_footer(double pause_time_counter);

 291 
 292   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 293 
 294   // These are macros so that, if the assert fires, we get the correct
 295   // line number, file, etc.
 296 
 297 #define heap_locking_asserts_params(_extra_message_)                          \
 298   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 299   (_extra_message_),                                                          \
 300   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 301   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 302   BOOL_TO_STR(Thread::current()->is_VM_thread())
 303 
 304 #define assert_heap_locked()                                                  \
 305   do {                                                                        \
 306     assert(Heap_lock->owned_by_self(),                                        \
 307            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 308   } while (0)
 309 
 310 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \


 682 
 683   // Insert any required filler objects in the G1 regions around the specified
 684   // ranges to make the regions parseable. This must be called after
 685   // alloc_archive_regions, and after class loading has occurred.
 686   void fill_archive_regions(MemRegion* range, size_t count);
 687 
 688   // For each of the specified MemRegions, uncommit the containing G1 regions
 689   // which had been allocated by alloc_archive_regions. This should be called
 690   // rather than fill_archive_regions at JVM init time if the archive file
 691   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 692   void dealloc_archive_regions(MemRegion* range, size_t count);
 693 
 694 protected:
 695 
 696   // Shrink the garbage-first heap by at most the given size (in bytes!).
 697   // (Rounds down to a HeapRegion boundary.)
 698   virtual void shrink(size_t expand_bytes);
 699   void shrink_helper(size_t expand_bytes);
 700 
 701   #if TASKQUEUE_STATS
 702   static void print_taskqueue_stats_hdr(outputStream* const st);
 703   void print_taskqueue_stats() const;
 704   void reset_taskqueue_stats();
 705   #endif // TASKQUEUE_STATS
 706 
 707   // Schedule the VM operation that will do an evacuation pause to
 708   // satisfy an allocation request of word_size. *succeeded will
 709   // return whether the VM operation was successful (it did do an
 710   // evacuation pause) or not (another thread beat us to it or the GC
 711   // locker was active). Given that we should not be holding the
 712   // Heap_lock when we enter this method, we will pass the
 713   // gc_count_before (i.e., total_collections()) as a parameter since
 714   // it has to be read while holding the Heap_lock. Currently, both
 715   // methods that call do_collection_pause() release the Heap_lock
 716   // before the call, so it's easy to read gc_count_before just before.
 717   HeapWord* do_collection_pause(size_t         word_size,
 718                                 uint           gc_count_before,
 719                                 bool*          succeeded,
 720                                 GCCause::Cause gc_cause);
 721 
 722   void wait_for_root_region_scanning();
 723 
 724   // The guts of the incremental collection pause, executed by the vm
 725   // thread. It returns false if it is unable to do the collection due
 726   // to the GC locker being active, true otherwise
 727   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 728 
 729   // Actually do the work of evacuating the collection set.
 730   virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
 731 
 732   void pre_evacuate_collection_set();
 733   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 734 
 735   // Print the header for the per-thread termination statistics.
 736   static void print_termination_stats_hdr();
 737   // Print actual per-thread termination statistics.
 738   void print_termination_stats(uint worker_id,

 739                                double elapsed_ms,
 740                                double strong_roots_ms,
 741                                double term_ms,
 742                                size_t term_attempts,
 743                                size_t alloc_buffer_waste,
 744                                size_t undo_waste) const;
 745   // Update object copying statistics.
 746   void record_obj_copy_mem_stats();
 747 
 748   // The g1 remembered set of the heap.
 749   G1RemSet* _g1_rem_set;
 750 
 751   // A set of cards that cover the objects for which the Rsets should be updated
 752   // concurrently after the collection.
 753   DirtyCardQueueSet _dirty_card_queue_set;
 754 
 755   // The closure used to refine a single card.
 756   RefineCardTableEntryClosure* _refine_cte_cl;
 757 
 758   // After a collection pause, make the regions in the CS into free


 952   // Initialize the G1CollectedHeap to have the initial and
 953   // maximum sizes and remembered and barrier sets
 954   // specified by the policy object.
 955   jint initialize();
 956 
 957   virtual void stop();
 958 
 959   // Return the (conservative) maximum heap alignment for any G1 heap
 960   static size_t conservative_max_heap_alignment();
 961 
 962   // Does operations required after initialization has been done.
 963   void post_initialize();
 964 
 965   // Initialize weak reference processing.
 966   void ref_processing_init();
 967 
 968   virtual Name kind() const {
 969     return CollectedHeap::G1CollectedHeap;
 970   }
 971 
 972   virtual const char* name() const {
 973     return "G1";
 974   }
 975 
 976   const G1CollectorState* collector_state() const { return &_collector_state; }
 977   G1CollectorState* collector_state() { return &_collector_state; }
 978 
 979   // The current policy object for the collector.
 980   G1CollectorPolicy* g1_policy() const { return _g1_policy; }
 981 
 982   virtual CollectorPolicy* collector_policy() const;
 983 
 984   // Adaptive size policy.  No such thing for g1.
 985   virtual AdaptiveSizePolicy* size_policy() { return NULL; }
 986 
 987   // The rem set and barrier set.
 988   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
 989 
 990   unsigned get_gc_time_stamp() {
 991     return _gc_time_stamp;
 992   }
 993 
 994   inline void reset_gc_time_stamp();
 995 


1477   // Verification
1478 
1479   // Perform any cleanup actions necessary before allowing a verification.
1480   virtual void prepare_for_verify();
1481 
1482   // Perform verification.
1483 
1484   // vo == UsePrevMarking  -> use "prev" marking information,
1485   // vo == UseNextMarking -> use "next" marking information
1486   // vo == UseMarkWord    -> use the mark word in the object header
1487   //
1488   // NOTE: Only the "prev" marking information is guaranteed to be
1489   // consistent most of the time, so most calls to this should use
1490   // vo == UsePrevMarking.
1491   // Currently, there is only one case where this is called with
1492   // vo == UseNextMarking, which is to verify the "next" marking
1493   // information at the end of remark.
1494   // Currently there is only one place where this is called with
1495   // vo == UseMarkWord, which is to verify the marking during a
1496   // full GC.
1497   void verify(VerifyOption vo);



1498 
1499   // The methods below are here for convenience and dispatch the
1500   // appropriate method depending on value of the given VerifyOption
1501   // parameter. The values for that parameter, and their meanings,
1502   // are the same as those above.
1503 
1504   bool is_obj_dead_cond(const oop obj,
1505                         const HeapRegion* hr,
1506                         const VerifyOption vo) const;
1507 
1508   bool is_obj_dead_cond(const oop obj,
1509                         const VerifyOption vo) const;
1510 
1511   G1HeapSummary create_g1_heap_summary();
1512   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1513 
1514   // Printing
1515 
1516   virtual void print_on(outputStream* st) const;
1517   virtual void print_extended_on(outputStream* st) const;
< prev index next >