< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 11545 : [mq]: 8159978-collection-set-as-array
rev 11546 : [mq]: 8159978-erikh-review


 761                                double term_ms,
 762                                size_t term_attempts,
 763                                size_t alloc_buffer_waste,
 764                                size_t undo_waste) const;
 765   // Update object copying statistics.
 766   void record_obj_copy_mem_stats();
 767 
 768   // The hot card cache for remembered set insertion optimization.
 769   G1HotCardCache* _hot_card_cache;
 770 
 771   // The g1 remembered set of the heap.
 772   G1RemSet* _g1_rem_set;
 773 
 774   // A set of cards that cover the objects for which the Rsets should be updated
 775   // concurrently after the collection.
 776   DirtyCardQueueSet _dirty_card_queue_set;
 777 
 778   // The closure used to refine a single card.
 779   RefineCardTableEntryClosure* _refine_cte_cl;
 780 
 781   // After a collection pause, make the regions in the CS into free
 782   // regions.
 783   void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 784 
 785   // Abandon the current collection set without recording policy
 786   // statistics or updating free lists.
 787   void abandon_collection_set(HeapRegion* cs_head);
 788 
 789   // The concurrent marker (and the thread it runs in.)
 790   G1ConcurrentMark* _cm;
 791   ConcurrentMarkThread* _cmThread;
 792 
 793   // The concurrent refiner.
 794   ConcurrentG1Refine* _cg1r;
 795 
 796   // The parallel task queues
 797   RefToScanQueueSet *_task_queues;
 798 
 799   // True iff a evacuation has failed in the current collection.
 800   bool _evacuation_failed;
 801 
 802   EvacuationFailedInfo* _evacuation_failed_info_array;
 803 
 804   // Failed evacuations cause some logical from-space objects to have
 805   // forwarding pointers to themselves.  Reset them.
 806   void remove_self_forwarding_pointers();
 807 


 913   // be processed.
 914   //
 915   // Instance of the is_alive closure for embedding into the
 916   // STW reference processor as the _is_alive_non_header field.
 917   // Supplying a value for the _is_alive_non_header field is
 918   // optional but doing so prevents unnecessary additions to
 919   // the discovered lists during reference discovery.
 920   G1STWIsAliveClosure _is_alive_closure_stw;
 921 
 922   // The (concurrent marking) reference processor...
 923   ReferenceProcessor* _ref_processor_cm;
 924 
 925   // Instance of the concurrent mark is_alive closure for embedding
 926   // into the Concurrent Marking reference processor as the
 927   // _is_alive_non_header field. Supplying a value for the
 928   // _is_alive_non_header field is optional but doing so prevents
 929   // unnecessary additions to the discovered lists during reference
 930   // discovery.
 931   G1CMIsAliveClosure _is_alive_closure_cm;
 932 
 933   // Cache used by G1CollectedHeap::start_cset_region_for_worker().
 934   HeapRegion** _worker_cset_start_region;
 935 
 936   // Time stamp to validate the regions recorded in the cache
 937   // used by G1CollectedHeap::start_cset_region_for_worker().
 938   // The heap region entry for a given worker is valid iff
 939   // the associated time stamp value matches the current value
 940   // of G1CollectedHeap::_gc_time_stamp.
 941   uint* _worker_cset_start_region_time_stamp;
 942 
 943   volatile bool _free_regions_coming;
 944 
 945 public:
 946 
 947   void set_refine_cte_cl_concurrency(bool concurrent);
 948 
 949   RefToScanQueue *task_queue(uint i) const;
 950 
 951   uint num_task_queues() const;
 952 
 953   // A set of cards where updates happened during the GC
 954   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
 955 
 956   // Create a G1CollectedHeap with the specified policy.
 957   // Must call the initialize method afterwards.
 958   // May not return if something goes wrong.
 959   G1CollectedHeap(G1CollectorPolicy* policy);
 960 
 961   // Initialize the G1CollectedHeap to have the initial and
 962   // maximum sizes and remembered and barrier sets


1194 
1195   // Calculate the region index of the given address. Given address must be
1196   // within the heap.
1197   inline uint addr_to_region(HeapWord* addr) const;
1198 
1199   inline HeapWord* bottom_addr_for_region(uint index) const;
1200 
1201   // Iterate over the heap regions in parallel. Assumes that this will be called
1202   // in parallel by ParallelGCThreads worker threads with distinct worker ids
1203   // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
1204   // to each of the regions, by attempting to claim the region using the
1205   // HeapRegionClaimer and, if successful, applying the closure to the claimed
1206   // region. The concurrent argument should be set to true if iteration is
1207   // performed concurrently, during which no assumptions are made for consistent
1208   // attributes of the heap regions (as they might be modified while iterating).
1209   void heap_region_par_iterate(HeapRegionClosure* cl,
1210                                uint worker_id,
1211                                HeapRegionClaimer* hrclaimer,
1212                                bool concurrent = false) const;
1213 
1214   // Clear the cached cset start regions and (more importantly)
1215   // the time stamps. Called when we reset the GC time stamp.
1216   void clear_cset_start_regions();
1217 
1218   // Given the id of a worker, obtain or calculate a suitable
1219   // starting region for iterating over the current collection set.
1220   HeapRegion* start_cset_region_for_worker(uint worker_i);
1221 
1222   // Iterate over the regions (if any) in the current collection set.
1223   void collection_set_iterate(HeapRegionClosure* blk);
1224 
1225   // As above but starting from region r
1226   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);



1227 
1228   HeapRegion* next_compaction_region(const HeapRegion* from) const;
1229 
1230   // Returns the HeapRegion that contains addr. addr must not be NULL.
1231   template <class T>
1232   inline HeapRegion* heap_region_containing(const T addr) const;
1233 
1234   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1235   // each address in the (reserved) heap is a member of exactly
1236   // one block.  The defining characteristic of a block is that it is
1237   // possible to find its size, and thus to progress forward to the next
1238   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1239   // represent Java objects, or they might be free blocks in a
1240   // free-list-based heap (or subheap), as long as the two kinds are
1241   // distinguishable and the size of each is determinable.
1242 
1243   // Returns the address of the start of the "block" that contains the
1244   // address "addr".  We say "blocks" instead of "object" since some heaps
1245   // may not pack objects densely; a chunk may either be an object or a
1246   // non-object.




 761                                double term_ms,
 762                                size_t term_attempts,
 763                                size_t alloc_buffer_waste,
 764                                size_t undo_waste) const;
 765   // Update object copying statistics.
 766   void record_obj_copy_mem_stats();
 767 
 768   // The hot card cache for remembered set insertion optimization.
 769   G1HotCardCache* _hot_card_cache;
 770 
 771   // The g1 remembered set of the heap.
 772   G1RemSet* _g1_rem_set;
 773 
 774   // A set of cards that cover the objects for which the Rsets should be updated
 775   // concurrently after the collection.
 776   DirtyCardQueueSet _dirty_card_queue_set;
 777 
 778   // The closure used to refine a single card.
 779   RefineCardTableEntryClosure* _refine_cte_cl;
 780 
 781   // After a collection pause, convert the regions in the collection set into free
 782   // regions.
 783   void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 784 
 785   // Abandon the current collection set without recording policy
 786   // statistics or updating free lists.
 787   void abandon_collection_set(G1CollectionSet* collection_set);
 788 
 789   // The concurrent marker (and the thread it runs in.)
 790   G1ConcurrentMark* _cm;
 791   ConcurrentMarkThread* _cmThread;
 792 
 793   // The concurrent refiner.
 794   ConcurrentG1Refine* _cg1r;
 795 
 796   // The parallel task queues
 797   RefToScanQueueSet *_task_queues;
 798 
 799   // True iff a evacuation has failed in the current collection.
 800   bool _evacuation_failed;
 801 
 802   EvacuationFailedInfo* _evacuation_failed_info_array;
 803 
 804   // Failed evacuations cause some logical from-space objects to have
 805   // forwarding pointers to themselves.  Reset them.
 806   void remove_self_forwarding_pointers();
 807 


 913   // be processed.
 914   //
 915   // Instance of the is_alive closure for embedding into the
 916   // STW reference processor as the _is_alive_non_header field.
 917   // Supplying a value for the _is_alive_non_header field is
 918   // optional but doing so prevents unnecessary additions to
 919   // the discovered lists during reference discovery.
 920   G1STWIsAliveClosure _is_alive_closure_stw;
 921 
 922   // The (concurrent marking) reference processor...
 923   ReferenceProcessor* _ref_processor_cm;
 924 
 925   // Instance of the concurrent mark is_alive closure for embedding
 926   // into the Concurrent Marking reference processor as the
 927   // _is_alive_non_header field. Supplying a value for the
 928   // _is_alive_non_header field is optional but doing so prevents
 929   // unnecessary additions to the discovered lists during reference
 930   // discovery.
 931   G1CMIsAliveClosure _is_alive_closure_cm;
 932 










 933   volatile bool _free_regions_coming;
 934 
 935 public:
 936 
 937   void set_refine_cte_cl_concurrency(bool concurrent);
 938 
 939   RefToScanQueue *task_queue(uint i) const;
 940 
 941   uint num_task_queues() const;
 942 
 943   // A set of cards where updates happened during the GC
 944   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
 945 
 946   // Create a G1CollectedHeap with the specified policy.
 947   // Must call the initialize method afterwards.
 948   // May not return if something goes wrong.
 949   G1CollectedHeap(G1CollectorPolicy* policy);
 950 
 951   // Initialize the G1CollectedHeap to have the initial and
 952   // maximum sizes and remembered and barrier sets


1184 
1185   // Calculate the region index of the given address. Given address must be
1186   // within the heap.
1187   inline uint addr_to_region(HeapWord* addr) const;
1188 
1189   inline HeapWord* bottom_addr_for_region(uint index) const;
1190 
1191   // Iterate over the heap regions in parallel. Assumes that this will be called
1192   // in parallel by ParallelGCThreads worker threads with distinct worker ids
1193   // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
1194   // to each of the regions, by attempting to claim the region using the
1195   // HeapRegionClaimer and, if successful, applying the closure to the claimed
1196   // region. The concurrent argument should be set to true if iteration is
1197   // performed concurrently, during which no assumptions are made for consistent
1198   // attributes of the heap regions (as they might be modified while iterating).
1199   void heap_region_par_iterate(HeapRegionClosure* cl,
1200                                uint worker_id,
1201                                HeapRegionClaimer* hrclaimer,
1202                                bool concurrent = false) const;
1203 








1204   // Iterate over the regions (if any) in the current collection set.
1205   void collection_set_iterate(HeapRegionClosure* blk);
1206 
1207   // Iterate over the regions (if any) in the current collection set. Starts the
1208   // iteration over the entire collection set so that the start regions of a given
1209   // worker id over the set active_workers are evenly spread across the set of
1210   // collection set regions.
1211   void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
1212 
1213   HeapRegion* next_compaction_region(const HeapRegion* from) const;
1214 
1215   // Returns the HeapRegion that contains addr. addr must not be NULL.
1216   template <class T>
1217   inline HeapRegion* heap_region_containing(const T addr) const;
1218 
1219   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1220   // each address in the (reserved) heap is a member of exactly
1221   // one block.  The defining characteristic of a block is that it is
1222   // possible to find its size, and thus to progress forward to the next
1223   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1224   // represent Java objects, or they might be free blocks in a
1225   // free-list-based heap (or subheap), as long as the two kinds are
1226   // distinguishable and the size of each is determinable.
1227 
1228   // Returns the address of the start of the "block" that contains the
1229   // address "addr".  We say "blocks" instead of "object" since some heaps
1230   // may not pack objects densely; a chunk may either be an object or a
1231   // non-object.


< prev index next >