< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page




 273   // this method will be found dead by the marking cycle).
 274   void allocate_dummy_regions() PRODUCT_RETURN;
 275 
 276   // Clear RSets after a compaction. It also resets the GC time stamps.
 277   void clear_rsets_post_compaction();
 278 
 279   // If the HR printer is active, dump the state of the regions in the
 280   // heap after a compaction.
 281   void print_hrm_post_compaction();
 282 
 283   // Create a memory mapper for auxiliary data structures of the given size and
 284   // translation factor.
 285   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 286                                                          size_t size,
 287                                                          size_t translation_factor);
 288 
 289   double verify(bool guard, const char* msg);
 290   void verify_before_gc();
 291   void verify_after_gc();
 292 
 293   void log_gc_header();
 294   void log_gc_footer(double pause_time_sec);
 295 
 296   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 297 
 298   void process_weak_jni_handles();
 299 
 300   // These are macros so that, if the assert fires, we get the correct
 301   // line number, file, etc.
 302 
 303 #define heap_locking_asserts_params(_extra_message_)                          \
 304   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 305   (_extra_message_),                                                          \
 306   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 307   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 308   BOOL_TO_STR(Thread::current()->is_VM_thread())
 309 
 310 #define assert_heap_locked()                                                  \
 311   do {                                                                        \
 312     assert(Heap_lock->owned_by_self(),                                        \
 313            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 314   } while (0)


 684 
 685   // Insert any required filler objects in the G1 regions around the specified
 686   // ranges to make the regions parseable. This must be called after
 687   // alloc_archive_regions, and after class loading has occurred.
 688   void fill_archive_regions(MemRegion* range, size_t count);
 689 
 690   // For each of the specified MemRegions, uncommit the containing G1 regions
 691   // which had been allocated by alloc_archive_regions. This should be called
 692   // rather than fill_archive_regions at JVM init time if the archive file
 693   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 694   void dealloc_archive_regions(MemRegion* range, size_t count);
 695 
 696 protected:
 697 
 698   // Shrink the garbage-first heap by at most the given size (in bytes!).
 699   // (Rounds down to a HeapRegion boundary.)
 700   virtual void shrink(size_t expand_bytes);
 701   void shrink_helper(size_t expand_bytes);
 702 
 703   #if TASKQUEUE_STATS
 704   static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 705   void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
 706   void reset_taskqueue_stats();
 707   #endif // TASKQUEUE_STATS
 708 
 709   // Schedule the VM operation that will do an evacuation pause to
 710   // satisfy an allocation request of word_size. *succeeded will
 711   // return whether the VM operation was successful (it did do an
 712   // evacuation pause) or not (another thread beat us to it or the GC
 713   // locker was active). Given that we should not be holding the
 714   // Heap_lock when we enter this method, we will pass the
 715   // gc_count_before (i.e., total_collections()) as a parameter since
 716   // it has to be read while holding the Heap_lock. Currently, both
 717   // methods that call do_collection_pause() release the Heap_lock
 718   // before the call, so it's easy to read gc_count_before just before.
 719   HeapWord* do_collection_pause(size_t         word_size,
 720                                 uint           gc_count_before,
 721                                 bool*          succeeded,
 722                                 GCCause::Cause gc_cause);
 723 
 724   void wait_for_root_region_scanning();
 725 
 726   // The guts of the incremental collection pause, executed by the vm
 727   // thread. It returns false if it is unable to do the collection due
 728   // to the GC locker being active, true otherwise
 729   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 730 
 731   // Actually do the work of evacuating the collection set.
 732   virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
 733 
 734   void pre_evacuate_collection_set();
 735   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 736 
 737   // Print the header for the per-thread termination statistics.
 738   static void print_termination_stats_hdr(outputStream* const st);
 739   // Print actual per-thread termination statistics.
 740   void print_termination_stats(outputStream* const st,
 741                                uint worker_id,
 742                                double elapsed_ms,
 743                                double strong_roots_ms,
 744                                double term_ms,
 745                                size_t term_attempts,
 746                                size_t alloc_buffer_waste,
 747                                size_t undo_waste) const;
 748   // Update object copying statistics.
 749   void record_obj_copy_mem_stats();
 750 
 751   // The g1 remembered set of the heap.
 752   G1RemSet* _g1_rem_set;
 753 
 754   // A set of cards that cover the objects for which the Rsets should be updated
 755   // concurrently after the collection.
 756   DirtyCardQueueSet _dirty_card_queue_set;
 757 
 758   // The closure used to refine a single card.
 759   RefineCardTableEntryClosure* _refine_cte_cl;
 760 
 761   // After a collection pause, make the regions in the CS into free


 948   // Initialize the G1CollectedHeap to have the initial and
 949   // maximum sizes and remembered and barrier sets
 950   // specified by the policy object.
 951   jint initialize();
 952 
 953   virtual void stop();
 954 
 955   // Return the (conservative) maximum heap alignment for any G1 heap
 956   static size_t conservative_max_heap_alignment();
 957 
 958   // Does operations required after initialization has been done.
 959   void post_initialize();
 960 
 961   // Initialize weak reference processing.
 962   void ref_processing_init();
 963 
 964   virtual Name kind() const {
 965     return CollectedHeap::G1CollectedHeap;
 966   }
 967 




 968   const G1CollectorState* collector_state() const { return &_collector_state; }
 969   G1CollectorState* collector_state() { return &_collector_state; }
 970 
 971   // The current policy object for the collector.
 972   G1CollectorPolicy* g1_policy() const { return _g1_policy; }
 973 
 974   virtual CollectorPolicy* collector_policy() const;
 975 
 976   // Adaptive size policy.  No such thing for g1.
 977   virtual AdaptiveSizePolicy* size_policy() { return NULL; }
 978 
 979   // The rem set and barrier set.
 980   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
 981 
 982   unsigned get_gc_time_stamp() {
 983     return _gc_time_stamp;
 984   }
 985 
 986   inline void reset_gc_time_stamp();
 987 


1345 
1346   // Returns the number of regions the humongous object of the given word size
1347   // requires.
1348   static size_t humongous_obj_size_in_regions(size_t word_size);
1349 
1350   // Print the maximum heap capacity.
1351   virtual size_t max_capacity() const;
1352 
1353   virtual jlong millis_since_last_gc();
1354 
1355 
1356   // Convenience function to be used in situations where the heap type can be
1357   // asserted to be this type.
1358   static G1CollectedHeap* heap();
1359 
1360   void set_region_short_lived_locked(HeapRegion* hr);
1361   // add appropriate methods for any other surv rate groups
1362 
1363   YoungList* young_list() const { return _young_list; }
1364 




1365   // debugging
1366   bool check_young_list_well_formed() {
1367     return _young_list->check_list_well_formed();
1368   }
1369 
1370   bool check_young_list_empty(bool check_heap,
1371                               bool check_sample = true);
1372 
1373   // *** Stuff related to concurrent marking.  It's not clear to me that so
1374   // many of these need to be public.
1375 
1376   // The functions below are helper functions that a subclass of
1377   // "CollectedHeap" can use in the implementation of its virtual
1378   // functions.
1379   // This performs a concurrent marking of the live objects in a
1380   // bitmap off to the side.
1381   void doConcurrentMark();
1382 
1383   bool isMarkedPrev(oop obj) const;
1384   bool isMarkedNext(oop obj) const;


1462   // Verification
1463 
1464   // Perform any cleanup actions necessary before allowing a verification.
1465   virtual void prepare_for_verify();
1466 
1467   // Perform verification.
1468 
1469   // vo == UsePrevMarking  -> use "prev" marking information,
1470   // vo == UseNextMarking -> use "next" marking information
1471   // vo == UseMarkWord    -> use the mark word in the object header
1472   //
1473   // NOTE: Only the "prev" marking information is guaranteed to be
1474   // consistent most of the time, so most calls to this should use
1475   // vo == UsePrevMarking.
1476   // Currently, there is only one case where this is called with
1477   // vo == UseNextMarking, which is to verify the "next" marking
1478   // information at the end of remark.
1479   // Currently there is only one place where this is called with
1480   // vo == UseMarkWord, which is to verify the marking during a
1481   // full GC.
1482   void verify(bool silent, VerifyOption vo);
1483 
1484   // Override; it uses the "prev" marking information
1485   virtual void verify(bool silent);
1486 
1487   // The methods below are here for convenience and dispatch the
1488   // appropriate method depending on value of the given VerifyOption
1489   // parameter. The values for that parameter, and their meanings,
1490   // are the same as those above.
1491 
1492   bool is_obj_dead_cond(const oop obj,
1493                         const HeapRegion* hr,
1494                         const VerifyOption vo) const;
1495 
1496   bool is_obj_dead_cond(const oop obj,
1497                         const VerifyOption vo) const;
1498 
1499   G1HeapSummary create_g1_heap_summary();
1500   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1501 
1502   // Printing
1503 
1504   virtual void print_on(outputStream* st) const;
1505   virtual void print_extended_on(outputStream* st) const;




 273   // this method will be found dead by the marking cycle).
 274   void allocate_dummy_regions() PRODUCT_RETURN;
 275 
 276   // Clear RSets after a compaction. It also resets the GC time stamps.
 277   void clear_rsets_post_compaction();
 278 
 279   // If the HR printer is active, dump the state of the regions in the
 280   // heap after a compaction.
 281   void print_hrm_post_compaction();
 282 
 283   // Create a memory mapper for auxiliary data structures of the given size and
 284   // translation factor.
 285   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 286                                                          size_t size,
 287                                                          size_t translation_factor);
 288 
 289   double verify(bool guard, const char* msg);
 290   void verify_before_gc();
 291   void verify_after_gc();
 292 
 293   void log_gc_footer(double pause_time_counter);

 294 
 295   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 296 
 297   void process_weak_jni_handles();
 298 
 299   // These are macros so that, if the assert fires, we get the correct
 300   // line number, file, etc.
 301 
 302 #define heap_locking_asserts_params(_extra_message_)                          \
 303   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 304   (_extra_message_),                                                          \
 305   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 306   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 307   BOOL_TO_STR(Thread::current()->is_VM_thread())
 308 
 309 #define assert_heap_locked()                                                  \
 310   do {                                                                        \
 311     assert(Heap_lock->owned_by_self(),                                        \
 312            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 313   } while (0)


 683 
 684   // Insert any required filler objects in the G1 regions around the specified
 685   // ranges to make the regions parseable. This must be called after
 686   // alloc_archive_regions, and after class loading has occurred.
 687   void fill_archive_regions(MemRegion* range, size_t count);
 688 
 689   // For each of the specified MemRegions, uncommit the containing G1 regions
 690   // which had been allocated by alloc_archive_regions. This should be called
 691   // rather than fill_archive_regions at JVM init time if the archive file
 692   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 693   void dealloc_archive_regions(MemRegion* range, size_t count);
 694 
 695 protected:
 696 
 697   // Shrink the garbage-first heap by at most the given size (in bytes!).
 698   // (Rounds down to a HeapRegion boundary.)
 699   virtual void shrink(size_t expand_bytes);
 700   void shrink_helper(size_t expand_bytes);
 701 
 702   #if TASKQUEUE_STATS
 703   static void print_taskqueue_stats_hdr(outputStream* const st);
 704   void print_taskqueue_stats() const;
 705   void reset_taskqueue_stats();
 706   #endif // TASKQUEUE_STATS
 707 
 708   // Schedule the VM operation that will do an evacuation pause to
 709   // satisfy an allocation request of word_size. *succeeded will
 710   // return whether the VM operation was successful (it did do an
 711   // evacuation pause) or not (another thread beat us to it or the GC
 712   // locker was active). Given that we should not be holding the
 713   // Heap_lock when we enter this method, we will pass the
 714   // gc_count_before (i.e., total_collections()) as a parameter since
 715   // it has to be read while holding the Heap_lock. Currently, both
 716   // methods that call do_collection_pause() release the Heap_lock
 717   // before the call, so it's easy to read gc_count_before just before.
 718   HeapWord* do_collection_pause(size_t         word_size,
 719                                 uint           gc_count_before,
 720                                 bool*          succeeded,
 721                                 GCCause::Cause gc_cause);
 722 
 723   void wait_for_root_region_scanning();
 724 
 725   // The guts of the incremental collection pause, executed by the vm
 726   // thread. It returns false if it is unable to do the collection due
 727   // to the GC locker being active, true otherwise
 728   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 729 
 730   // Actually do the work of evacuating the collection set.
 731   virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
 732 
 733   void pre_evacuate_collection_set();
 734   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 735 
 736   // Print the header for the per-thread termination statistics.
 737   static void print_termination_stats_hdr();
 738   // Print actual per-thread termination statistics.
 739   void print_termination_stats(uint worker_id,

 740                                double elapsed_ms,
 741                                double strong_roots_ms,
 742                                double term_ms,
 743                                size_t term_attempts,
 744                                size_t alloc_buffer_waste,
 745                                size_t undo_waste) const;
 746   // Update object copying statistics.
 747   void record_obj_copy_mem_stats();
 748 
 749   // The g1 remembered set of the heap.
 750   G1RemSet* _g1_rem_set;
 751 
 752   // A set of cards that cover the objects for which the Rsets should be updated
 753   // concurrently after the collection.
 754   DirtyCardQueueSet _dirty_card_queue_set;
 755 
 756   // The closure used to refine a single card.
 757   RefineCardTableEntryClosure* _refine_cte_cl;
 758 
 759   // After a collection pause, make the regions in the CS into free


 946   // Initialize the G1CollectedHeap to have the initial and
 947   // maximum sizes and remembered and barrier sets
 948   // specified by the policy object.
 949   jint initialize();
 950 
 951   virtual void stop();
 952 
 953   // Return the (conservative) maximum heap alignment for any G1 heap
 954   static size_t conservative_max_heap_alignment();
 955 
 956   // Does operations required after initialization has been done.
 957   void post_initialize();
 958 
 959   // Initialize weak reference processing.
 960   void ref_processing_init();
 961 
 962   virtual Name kind() const {
 963     return CollectedHeap::G1CollectedHeap;
 964   }
 965 
 966   virtual const char* name() const {
 967     return "G1";
 968   }
 969 
 970   const G1CollectorState* collector_state() const { return &_collector_state; }
 971   G1CollectorState* collector_state() { return &_collector_state; }
 972 
 973   // The current policy object for the collector.
 974   G1CollectorPolicy* g1_policy() const { return _g1_policy; }
 975 
 976   virtual CollectorPolicy* collector_policy() const;
 977 
 978   // Adaptive size policy.  No such thing for g1.
 979   virtual AdaptiveSizePolicy* size_policy() { return NULL; }
 980 
 981   // The rem set and barrier set.
 982   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
 983 
 984   unsigned get_gc_time_stamp() {
 985     return _gc_time_stamp;
 986   }
 987 
 988   inline void reset_gc_time_stamp();
 989 


1347 
1348   // Returns the number of regions the humongous object of the given word size
1349   // requires.
1350   static size_t humongous_obj_size_in_regions(size_t word_size);
1351 
1352   // Print the maximum heap capacity.
1353   virtual size_t max_capacity() const;
1354 
1355   virtual jlong millis_since_last_gc();
1356 
1357 
1358   // Convenience function to be used in situations where the heap type can be
1359   // asserted to be this type.
1360   static G1CollectedHeap* heap();
1361 
1362   void set_region_short_lived_locked(HeapRegion* hr);
1363   // add appropriate methods for any other surv rate groups
1364 
1365   YoungList* young_list() const { return _young_list; }
1366 
1367   uint old_regions_count() const { return _old_set.length(); }
1368 
1369   uint humongous_regions_count() const { return _humongous_set.length(); }
1370 
1371   // debugging
1372   bool check_young_list_well_formed() {
1373     return _young_list->check_list_well_formed();
1374   }
1375 
1376   bool check_young_list_empty(bool check_heap,
1377                               bool check_sample = true);
1378 
1379   // *** Stuff related to concurrent marking.  It's not clear to me that so
1380   // many of these need to be public.
1381 
1382   // The functions below are helper functions that a subclass of
1383   // "CollectedHeap" can use in the implementation of its virtual
1384   // functions.
1385   // This performs a concurrent marking of the live objects in a
1386   // bitmap off to the side.
1387   void doConcurrentMark();
1388 
1389   bool isMarkedPrev(oop obj) const;
1390   bool isMarkedNext(oop obj) const;


1468   // Verification
1469 
1470   // Perform any cleanup actions necessary before allowing a verification.
1471   virtual void prepare_for_verify();
1472 
1473   // Perform verification.
1474 
1475   // vo == UsePrevMarking  -> use "prev" marking information,
1476   // vo == UseNextMarking -> use "next" marking information
1477   // vo == UseMarkWord    -> use the mark word in the object header
1478   //
1479   // NOTE: Only the "prev" marking information is guaranteed to be
1480   // consistent most of the time, so most calls to this should use
1481   // vo == UsePrevMarking.
1482   // Currently, there is only one case where this is called with
1483   // vo == UseNextMarking, which is to verify the "next" marking
1484   // information at the end of remark.
1485   // Currently there is only one place where this is called with
1486   // vo == UseMarkWord, which is to verify the marking during a
1487   // full GC.
1488   void verify(VerifyOption vo);



1489 
1490   // The methods below are here for convenience and dispatch the
1491   // appropriate method depending on value of the given VerifyOption
1492   // parameter. The values for that parameter, and their meanings,
1493   // are the same as those above.
1494 
1495   bool is_obj_dead_cond(const oop obj,
1496                         const HeapRegion* hr,
1497                         const VerifyOption vo) const;
1498 
1499   bool is_obj_dead_cond(const oop obj,
1500                         const VerifyOption vo) const;
1501 
1502   G1HeapSummary create_g1_heap_summary();
1503   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1504 
1505   // Printing
1506 
1507   virtual void print_on(outputStream* st) const;
1508   virtual void print_extended_on(outputStream* st) const;


< prev index next >