src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 2891 : 7119908: G1: Cache CSet start region for each worker for subsequent reuse
Summary: Cache workers' calculated starting heap region, used for parallel iteration over the collcection set, for subsequent reuse.
Reviewed-by: tonyp, brutisso


 926   // be processed.
 927   //
 928   // Instance of the is_alive closure for embedding into the
 929   // STW reference processor as the _is_alive_non_header field.
 930   // Supplying a value for the _is_alive_non_header field is
 931   // optional but doing so prevents unnecessary additions to
 932   // the discovered lists during reference discovery.
 933   G1STWIsAliveClosure _is_alive_closure_stw;
 934 
 935   // The (concurrent marking) reference processor...
 936   ReferenceProcessor* _ref_processor_cm;
 937 
 938   // Instance of the concurrent mark is_alive closure for embedding
 939   // into the Concurrent Marking reference processor as the
 940   // _is_alive_non_header field. Supplying a value for the
 941   // _is_alive_non_header field is optional but doing so prevents
 942   // unnecessary additions to the discovered lists during reference
 943   // discovery.
 944   G1CMIsAliveClosure _is_alive_closure_cm;
 945 










 946   enum G1H_process_strong_roots_tasks {
 947     G1H_PS_mark_stack_oops_do,
 948     G1H_PS_refProcessor_oops_do,
 949     // Leave this one last.
 950     G1H_PS_NumElements
 951   };
 952 
 953   SubTasksDone* _process_strong_tasks;
 954 
 955   volatile bool _free_regions_coming;
 956 
 957 public:
 958 
 959   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
 960 
 961   void set_refine_cte_cl_concurrency(bool concurrent);
 962 
 963   RefToScanQueue *task_queue(int i) const;
 964 
 965   // A set of cards where updates happened during the GC


1013   // The rem set and barrier set.
1014   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
1015   ModRefBarrierSet* mr_bs() const { return _mr_bs; }
1016 
1017   // The rem set iterator.
1018   HeapRegionRemSetIterator* rem_set_iterator(int i) {
1019     return _rem_set_iterator[i];
1020   }
1021 
1022   HeapRegionRemSetIterator* rem_set_iterator() {
1023     return _rem_set_iterator[0];
1024   }
1025 
1026   unsigned get_gc_time_stamp() {
1027     return _gc_time_stamp;
1028   }
1029 
1030   void reset_gc_time_stamp() {
1031     _gc_time_stamp = 0;
1032     OrderAccess::fence();



1033   }
1034 
1035   void increment_gc_time_stamp() {
1036     ++_gc_time_stamp;
1037     OrderAccess::fence();
1038   }
1039 
1040   void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1041                                   DirtyCardQueue* into_cset_dcq,
1042                                   bool concurrent, int worker_i);
1043 
1044   // The shared block offset table array.
1045   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1046 
1047   // Reference Processing accessors
1048 
1049   // The STW reference processor....
1050   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1051 
1052   // The Concurent Marking reference processor...


1283   // successful, applying the closure to each region in the chunk (and
1284   // setting the claim value of the second and subsequent regions of the
1285   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1286   // i.e., that a closure never attempt to abort a traversal.
1287   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1288                                        int worker,
1289                                        int no_of_par_workers,
1290                                        jint claim_value);
1291 
1292   // It resets all the region claim values to the default.
1293   void reset_heap_region_claim_values();
1294 
1295 #ifdef ASSERT
1296   bool check_heap_region_claim_values(jint claim_value);
1297 
1298   // Same as the routine above but only checks regions in the
1299   // current collection set.
1300   bool check_cset_heap_region_claim_values(jint claim_value);
1301 #endif // ASSERT
1302 
1303   // Given the id of a worker, calculate a suitable
1304   // starting region for iterating over the current
1305   // collection set.



1306   HeapRegion* start_cset_region_for_worker(int worker_i);
1307 
1308   // Iterate over the regions (if any) in the current collection set.
1309   void collection_set_iterate(HeapRegionClosure* blk);
1310 
1311   // As above but starting from region r
1312   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1313 
1314   // Returns the first (lowest address) compactible space in the heap.
1315   virtual CompactibleSpace* first_compactible_space();
1316 
1317   // A CollectedHeap will contain some number of spaces.  This finds the
1318   // space containing a given address, or else returns NULL.
1319   virtual Space* space_containing(const void* addr) const;
1320 
1321   // A G1CollectedHeap will contain some number of heap regions.  This
1322   // finds the region containing a given address, or else returns NULL.
1323   template <class T>
1324   inline HeapRegion* heap_region_containing(const T addr) const;
1325 




 926   // be processed.
 927   //
 928   // Instance of the is_alive closure for embedding into the
 929   // STW reference processor as the _is_alive_non_header field.
 930   // Supplying a value for the _is_alive_non_header field is
 931   // optional but doing so prevents unnecessary additions to
 932   // the discovered lists during reference discovery.
 933   G1STWIsAliveClosure _is_alive_closure_stw;
 934 
 935   // The (concurrent marking) reference processor...
 936   ReferenceProcessor* _ref_processor_cm;
 937 
 938   // Instance of the concurrent mark is_alive closure for embedding
 939   // into the Concurrent Marking reference processor as the
 940   // _is_alive_non_header field. Supplying a value for the
 941   // _is_alive_non_header field is optional but doing so prevents
 942   // unnecessary additions to the discovered lists during reference
 943   // discovery.
 944   G1CMIsAliveClosure _is_alive_closure_cm;
 945 
 946   // Cache used by G1CollectedHeap::start_cset_region_for_worker().
 947   HeapRegion** _worker_cset_start_region;
 948 
 949   // Time stamp to validate the regions recorded in the cache
 950   // used by G1CollectedHeap::start_cset_region_for_worker().
 951   // The heap region entry for a given worker is valid iff
 952   // the associated time stamp value matches the current value
 953   // of G1CollectedHeap::_gc_time_stamp.
 954   unsigned int* _worker_cset_start_region_time_stamp;
 955 
 956   enum G1H_process_strong_roots_tasks {
 957     G1H_PS_mark_stack_oops_do,
 958     G1H_PS_refProcessor_oops_do,
 959     // Leave this one last.
 960     G1H_PS_NumElements
 961   };
 962 
 963   SubTasksDone* _process_strong_tasks;
 964 
 965   volatile bool _free_regions_coming;
 966 
 967 public:
 968 
 969   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
 970 
 971   void set_refine_cte_cl_concurrency(bool concurrent);
 972 
 973   RefToScanQueue *task_queue(int i) const;
 974 
 975   // A set of cards where updates happened during the GC


1023   // The rem set and barrier set.
1024   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
1025   ModRefBarrierSet* mr_bs() const { return _mr_bs; }
1026 
1027   // The rem set iterator.
1028   HeapRegionRemSetIterator* rem_set_iterator(int i) {
1029     return _rem_set_iterator[i];
1030   }
1031 
1032   HeapRegionRemSetIterator* rem_set_iterator() {
1033     return _rem_set_iterator[0];
1034   }
1035 
1036   unsigned get_gc_time_stamp() {
1037     return _gc_time_stamp;
1038   }
1039 
1040   void reset_gc_time_stamp() {
1041     _gc_time_stamp = 0;
1042     OrderAccess::fence();
1043     // Clear the cached CSet starting regions and time stamps.
1044     // Their validity is dependent on the GC timestamp.
1045     clear_cset_start_regions();
1046   }
1047 
1048   void increment_gc_time_stamp() {
1049     ++_gc_time_stamp;
1050     OrderAccess::fence();
1051   }
1052 
1053   void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1054                                   DirtyCardQueue* into_cset_dcq,
1055                                   bool concurrent, int worker_i);
1056 
1057   // The shared block offset table array.
1058   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1059 
1060   // Reference Processing accessors
1061 
1062   // The STW reference processor....
1063   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1064 
1065   // The Concurent Marking reference processor...


1296   // successful, applying the closure to each region in the chunk (and
1297   // setting the claim value of the second and subsequent regions of the
1298   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1299   // i.e., that a closure never attempt to abort a traversal.
1300   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1301                                        int worker,
1302                                        int no_of_par_workers,
1303                                        jint claim_value);
1304 
1305   // It resets all the region claim values to the default.
1306   void reset_heap_region_claim_values();
1307 
1308 #ifdef ASSERT
1309   bool check_heap_region_claim_values(jint claim_value);
1310 
1311   // Same as the routine above but only checks regions in the
1312   // current collection set.
1313   bool check_cset_heap_region_claim_values(jint claim_value);
1314 #endif // ASSERT
1315 
1316   // Clear the cached cset start regions and (more importantly)
1317   // the time stamps. Called when we reset the GC time stamp.
1318   void clear_cset_start_regions();
1319 
1320   // Given the id of a worker, obtain or calculate a suitable
1321   // starting region for iterating over the current collection set.
1322   HeapRegion* start_cset_region_for_worker(int worker_i);
1323 
1324   // Iterate over the regions (if any) in the current collection set.
1325   void collection_set_iterate(HeapRegionClosure* blk);
1326 
1327   // As above but starting from region r
1328   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1329 
1330   // Returns the first (lowest address) compactible space in the heap.
1331   virtual CompactibleSpace* first_compactible_space();
1332 
1333   // A CollectedHeap will contain some number of spaces.  This finds the
1334   // space containing a given address, or else returns NULL.
1335   virtual Space* space_containing(const void* addr) const;
1336 
1337   // A G1CollectedHeap will contain some number of heap regions.  This
1338   // finds the region containing a given address, or else returns NULL.
1339   template <class T>
1340   inline HeapRegion* heap_region_containing(const T addr) const;
1341