< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 54086 : imported patch 8219100-cleanup-young-collection-prologue
rev 54087 : imported patch 8218668-reorganize-collection-set


 126  private:
 127   void reset_from_card_cache(uint start_idx, size_t num_regions);
 128  public:
 129   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 130 };
 131 
 132 class G1CollectedHeap : public CollectedHeap {
 133   friend class G1FreeCollectionSetTask;
 134   friend class VM_CollectForMetadataAllocation;
 135   friend class VM_G1CollectForAllocation;
 136   friend class VM_G1CollectFull;
 137   friend class VMStructs;
 138   friend class MutatorAllocRegion;
 139   friend class G1FullCollector;
 140   friend class G1GCAllocRegion;
 141   friend class G1HeapVerifier;
 142 
 143   // Closures used in implementation.
 144   friend class G1ParScanThreadState;
 145   friend class G1ParScanThreadStateSet;
 146   friend class G1ParTask;
 147   friend class G1PLABAllocator;
 148   friend class G1PrepareCompactClosure;
 149 
 150   // Other related classes.
 151   friend class HeapRegionClaimer;
 152 
 153   // Testing classes.
 154   friend class G1CheckCSetFastTableClosure;
 155 
 156 private:
 157   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 158 
 159   WorkGang* _workers;
 160   G1CollectorPolicy* _collector_policy;
 161   G1CardTable* _card_table;
 162 
 163   SoftRefPolicy      _soft_ref_policy;
 164 
 165   static size_t _humongous_object_threshold_in_words;
 166 
 167   // These sets keep track of old, archive and humongous regions respectively.
 168   HeapRegionSet _old_set;


 502   // successful, perform the allocation and return the address of the
 503   // allocated block, or else "NULL".
 504   HeapWord* expand_and_allocate(size_t word_size);
 505 
 506   // Process any reference objects discovered.
 507   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 508 
 509   // If during an initial mark pause we may install a pending list head which is not
 510   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 511   // to discover.
 512   void make_pending_list_reachable();
 513 
 514   // Merges the information gathered on a per-thread basis for all worker threads
 515   // during GC into global variables.
 516   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 517 public:
 518   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 519 
 520   WorkGang* workers() const { return _workers; }
 521 


 522   G1Allocator* allocator() {
 523     return _allocator;
 524   }
 525 
 526   G1HeapVerifier* verifier() {
 527     return _verifier;
 528   }
 529 
 530   G1MonitoringSupport* g1mm() {
 531     assert(_g1mm != NULL, "should have been initialized");
 532     return _g1mm;
 533   }
 534 
 535   void resize_heap_if_necessary();
 536 
 537   // Expand the garbage-first heap by at least the given size (in bytes!).
 538   // Returns true if the heap was expanded by the requested amount;
 539   // false otherwise.
 540   // (Rounds up to a HeapRegion boundary.)
 541   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);


 721   // methods that call do_collection_pause() release the Heap_lock
 722   // before the call, so it's easy to read gc_count_before just before.
 723   HeapWord* do_collection_pause(size_t         word_size,
 724                                 uint           gc_count_before,
 725                                 bool*          succeeded,
 726                                 GCCause::Cause gc_cause);
 727 
 728   void wait_for_root_region_scanning();
 729 
 730   // The guts of the incremental collection pause, executed by the vm
 731   // thread. It returns false if it is unable to do the collection due
 732   // to the GC locker being active, true otherwise
 733   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 734 
 735   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 736   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 737   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 738 
 739   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 740 
 741   // Actually do the work of evacuating the collection set.
 742   void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
 743   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 744   void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);

 745 
 746   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info);
 747   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 748 
 749   void expand_heap_after_young_collection();
 750   // Update object copying statistics.
 751   void record_obj_copy_mem_stats();
 752 
 753   // The hot card cache for remembered set insertion optimization.
 754   G1HotCardCache* _hot_card_cache;
 755 
 756   // The g1 remembered set of the heap.
 757   G1RemSet* _rem_set;
 758 
 759   // A set of cards that cover the objects for which the Rsets should be updated
 760   // concurrently after the collection.
 761   G1DirtyCardQueueSet _dirty_card_queue_set;
 762 
 763   // After a collection pause, convert the regions in the collection set into free
 764   // regions.


1407   virtual void print_gc_threads_on(outputStream* st) const;
1408   virtual void gc_threads_do(ThreadClosure* tc) const;
1409 
1410   // Override
1411   void print_tracing_info() const;
1412 
1413   // The following two methods are helpful for debugging RSet issues.
1414   void print_cset_rsets() PRODUCT_RETURN;
1415   void print_all_rsets() PRODUCT_RETURN;
1416 
1417   size_t pending_card_num();
1418 };
1419 
1420 class G1ParEvacuateFollowersClosure : public VoidClosure {
1421 private:
1422   double _start_term;
1423   double _term_time;
1424   size_t _term_attempts;
1425 
1426   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1427   void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
1428 protected:
1429   G1CollectedHeap*              _g1h;
1430   G1ParScanThreadState*         _par_scan_state;
1431   RefToScanQueueSet*            _queues;
1432   ParallelTaskTerminator*       _terminator;
1433   G1GCPhaseTimes::GCParPhases   _phase;
1434 
1435   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1436   RefToScanQueueSet*      queues()         { return _queues; }
1437   ParallelTaskTerminator* terminator()     { return _terminator; }
1438 
1439 public:
1440   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1441                                 G1ParScanThreadState* par_scan_state,
1442                                 RefToScanQueueSet* queues,
1443                                 ParallelTaskTerminator* terminator,
1444                                 G1GCPhaseTimes::GCParPhases phase)
1445     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1446       _g1h(g1h), _par_scan_state(par_scan_state),
1447       _queues(queues), _terminator(terminator), _phase(phase) {}


 126  private:
 127   void reset_from_card_cache(uint start_idx, size_t num_regions);
 128  public:
 129   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 130 };
 131 
 132 class G1CollectedHeap : public CollectedHeap {
 133   friend class G1FreeCollectionSetTask;
 134   friend class VM_CollectForMetadataAllocation;
 135   friend class VM_G1CollectForAllocation;
 136   friend class VM_G1CollectFull;
 137   friend class VMStructs;
 138   friend class MutatorAllocRegion;
 139   friend class G1FullCollector;
 140   friend class G1GCAllocRegion;
 141   friend class G1HeapVerifier;
 142 
 143   // Closures used in implementation.
 144   friend class G1ParScanThreadState;
 145   friend class G1ParScanThreadStateSet;
 146   friend class G1EvacuateRegionsTask;
 147   friend class G1PLABAllocator;

 148 
 149   // Other related classes.
 150   friend class HeapRegionClaimer;
 151 
 152   // Testing classes.
 153   friend class G1CheckCSetFastTableClosure;
 154 
 155 private:
 156   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 157 
 158   WorkGang* _workers;
 159   G1CollectorPolicy* _collector_policy;
 160   G1CardTable* _card_table;
 161 
 162   SoftRefPolicy      _soft_ref_policy;
 163 
 164   static size_t _humongous_object_threshold_in_words;
 165 
 166   // These sets keep track of old, archive and humongous regions respectively.
 167   HeapRegionSet _old_set;


 501   // successful, perform the allocation and return the address of the
 502   // allocated block, or else "NULL".
 503   HeapWord* expand_and_allocate(size_t word_size);
 504 
 505   // Process any reference objects discovered.
 506   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 507 
 508   // If during an initial mark pause we may install a pending list head which is not
 509   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 510   // to discover.
 511   void make_pending_list_reachable();
 512 
 513   // Merges the information gathered on a per-thread basis for all worker threads
 514   // during GC into global variables.
 515   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 516 public:
 517   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 518 
 519   WorkGang* workers() const { return _workers; }
 520 
 521   Tickspan run_task(AbstractGangTask* task, uint num_workers = 0);
 522 
 523   G1Allocator* allocator() {
 524     return _allocator;
 525   }
 526 
 527   G1HeapVerifier* verifier() {
 528     return _verifier;
 529   }
 530 
 531   G1MonitoringSupport* g1mm() {
 532     assert(_g1mm != NULL, "should have been initialized");
 533     return _g1mm;
 534   }
 535 
 536   void resize_heap_if_necessary();
 537 
 538   // Expand the garbage-first heap by at least the given size (in bytes!).
 539   // Returns true if the heap was expanded by the requested amount;
 540   // false otherwise.
 541   // (Rounds up to a HeapRegion boundary.)
 542   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);


 722   // methods that call do_collection_pause() release the Heap_lock
 723   // before the call, so it's easy to read gc_count_before just before.
 724   HeapWord* do_collection_pause(size_t         word_size,
 725                                 uint           gc_count_before,
 726                                 bool*          succeeded,
 727                                 GCCause::Cause gc_cause);
 728 
 729   void wait_for_root_region_scanning();
 730 
 731   // The guts of the incremental collection pause, executed by the vm
 732   // thread. It returns false if it is unable to do the collection due
 733   // to the GC locker being active, true otherwise
 734   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 735 
 736   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 737   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 738   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 739 
 740   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 741 
 742   // Actually do the work of evacuating the parts of the collection set.
 743   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 744   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 745   // Evacuate the next set of optional regions.
 746   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 747 
 748   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info);
 749   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 750 
 751   void expand_heap_after_young_collection();
 752   // Update object copying statistics.
 753   void record_obj_copy_mem_stats();
 754 
 755   // The hot card cache for remembered set insertion optimization.
 756   G1HotCardCache* _hot_card_cache;
 757 
 758   // The g1 remembered set of the heap.
 759   G1RemSet* _rem_set;
 760 
 761   // A set of cards that cover the objects for which the Rsets should be updated
 762   // concurrently after the collection.
 763   G1DirtyCardQueueSet _dirty_card_queue_set;
 764 
 765   // After a collection pause, convert the regions in the collection set into free
 766   // regions.


1409   virtual void print_gc_threads_on(outputStream* st) const;
1410   virtual void gc_threads_do(ThreadClosure* tc) const;
1411 
1412   // Override
1413   void print_tracing_info() const;
1414 
1415   // The following two methods are helpful for debugging RSet issues.
1416   void print_cset_rsets() PRODUCT_RETURN;
1417   void print_all_rsets() PRODUCT_RETURN;
1418 
1419   size_t pending_card_num();
1420 };
1421 
1422 class G1ParEvacuateFollowersClosure : public VoidClosure {
1423 private:
1424   double _start_term;
1425   double _term_time;
1426   size_t _term_attempts;
1427 
1428   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1429   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
1430 protected:
1431   G1CollectedHeap*              _g1h;
1432   G1ParScanThreadState*         _par_scan_state;
1433   RefToScanQueueSet*            _queues;
1434   ParallelTaskTerminator*       _terminator;
1435   G1GCPhaseTimes::GCParPhases   _phase;
1436 
1437   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1438   RefToScanQueueSet*      queues()         { return _queues; }
1439   ParallelTaskTerminator* terminator()     { return _terminator; }
1440 
1441 public:
1442   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1443                                 G1ParScanThreadState* par_scan_state,
1444                                 RefToScanQueueSet* queues,
1445                                 ParallelTaskTerminator* terminator,
1446                                 G1GCPhaseTimes::GCParPhases phase)
1447     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1448       _g1h(g1h), _par_scan_state(par_scan_state),
1449       _queues(queues), _terminator(terminator), _phase(phase) {}
< prev index next >