< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 7471 : 8060025: Object copy time regressions after JDK-8031323 and JDK-8057536
Summary: Evaluate and improve object copy time by micro-optimizations and splitting out slow and fast paths aggressively.
Reviewed-by:
Contributed-by: Tony Printezis <tprintezis@twitter.com>, Thomas Schatzl <thomas.schatzl@oracle.com>
rev 7472 : [mq]: 8060025-mikael-review1


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1Allocator.hpp"
  30 #include "gc_implementation/g1/concurrentMark.hpp"
  31 #include "gc_implementation/g1/evacuationInfo.hpp"
  32 #include "gc_implementation/g1/g1AllocRegion.hpp"
  33 #include "gc_implementation/g1/g1BiasedArray.hpp"
  34 #include "gc_implementation/g1/g1HRPrinter.hpp"

  35 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
  36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  37 #include "gc_implementation/g1/g1YCTypes.hpp"
  38 #include "gc_implementation/g1/heapRegionManager.hpp"
  39 #include "gc_implementation/g1/heapRegionSet.hpp"
  40 #include "gc_implementation/shared/hSpaceCounters.hpp"
  41 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  42 #include "memory/barrierSet.hpp"
  43 #include "memory/memRegion.hpp"
  44 #include "memory/sharedHeap.hpp"
  45 #include "utilities/stack.hpp"
  46 
  47 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  48 // It uses the "Garbage First" heap organization and algorithm, which
  49 // may combine concurrent marking with parallel, incremental compaction of
  50 // heap subsets that will yield large amounts of garbage.
  51 
  52 // Forward declarations
  53 class HeapRegion;
  54 class HRRSCleanupTask;


 530                                          int* gclocker_retry_count_ret);
 531 
 532   // Allocation attempt that should be called during safepoints (e.g.,
 533   // at the end of a successful GC). expect_null_mutator_alloc_region
 534   // specifies whether the mutator alloc region is expected to be NULL
 535   // or not.
 536   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 537                                             AllocationContext_t context,
 538                                             bool expect_null_mutator_alloc_region);
 539 
 540   // It dirties the cards that cover the block so that so that the post
 541   // write barrier never queues anything when updating objects on this
 542   // block. It is assumed (and in fact we assert) that the block
 543   // belongs to a young region.
 544   inline void dirty_young_block(HeapWord* start, size_t word_size);
 545 
 546   // Allocate blocks during garbage collection. Will ensure an
 547   // allocation region, either by picking one or expanding the
 548   // heap, and then allocate a block of the given size. The block
 549   // may not be a humongous - it must fit into a single heap region.
 550   HeapWord* par_allocate_during_gc(in_cset_state_t dest,
 551                                    size_t word_size,
 552                                    AllocationContext_t context) {
 553     switch (dest) {
 554       case InCSetState::Young:
 555         return survivor_attempt_allocation(word_size, context);
 556       case InCSetState::Old:
 557         return old_attempt_allocation(word_size, context);
 558       default:
 559         assert(false, err_msg("Unknown dest: %d", dest));
 560         break;
 561     }
 562     // keep some compilers happy
 563     return NULL;
 564   }
 565 
 566   // Ensure that no further allocations can happen in "r", bearing in mind
 567   // that parallel threads might be attempting allocations.
 568   void par_allocate_remaining_space(HeapRegion* r);
 569 
 570   // Allocation attempt during GC for a survivor object / PLAB.
 571   inline HeapWord* survivor_attempt_allocation(size_t word_size,
 572                                                AllocationContext_t context);
 573 
 574   // Allocation attempt during GC for an old object / PLAB.
 575   inline HeapWord* old_attempt_allocation(size_t word_size,
 576                                           AllocationContext_t context);
 577 
 578   // These methods are the "callbacks" from the G1AllocRegion class.
 579 
 580   // For mutator alloc regions.
 581   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 582   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 583                                    size_t allocated_bytes);
 584 
 585   // For GC alloc regions.


 631   void enqueue_discovered_references(uint no_of_gc_workers);
 632 
 633 public:
 634 
 635   G1Allocator* allocator() {
 636     return _allocator;
 637   }
 638 
 639   G1MonitoringSupport* g1mm() {
 640     assert(_g1mm != NULL, "should have been initialized");
 641     return _g1mm;
 642   }
 643 
 644   // Expand the garbage-first heap by at least the given size (in bytes!).
 645   // Returns true if the heap was expanded by the requested amount;
 646   // false otherwise.
 647   // (Rounds up to a HeapRegion boundary.)
 648   bool expand(size_t expand_bytes);
 649 
 650   // Returns the PLAB statistics for a given destination.
 651   PLABStats* alloc_buffer_stats(in_cset_state_t dest) {
 652     switch (dest) {
 653       case InCSetState::Young:
 654         return &_survivor_plab_stats;
 655       case InCSetState::Old:
 656         return &_old_plab_stats;
 657       default:
 658         assert(false, err_msg("unknown dest: %d", dest));
 659         break;
 660     }
 661     // keep some compilers happy
 662     return NULL;
 663   }
 664 
 665   // Determines PLAB size for a given destination.
 666   size_t desired_plab_sz(in_cset_state_t dest) {
 667     size_t gclab_word_size = 0;
 668     switch (dest) {
 669       case InCSetState::Young:
 670         gclab_word_size = _survivor_plab_stats.desired_plab_sz();
 671         break;
 672       case InCSetState::Old:
 673         gclab_word_size = _old_plab_stats.desired_plab_sz();
 674         break;
 675       default:
 676         assert(false, err_msg("Unknown dest: %d", dest));
 677         break;
 678     }
 679 
 680     // Prevent humongous PLAB sizes for two reasons:
 681     // * PLABs are allocated using a similar paths as oops, but should
 682     //   never be in a humongous region
 683     // * Allowing humongous PLABs needlessly churns the region free lists
 684     return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
 685   }
 686 
 687   inline AllocationContextStats& allocation_context_stats();
 688 
 689   // Do anything common to GC's.
 690   virtual void gc_prologue(bool full);
 691   virtual void gc_epilogue(bool full);
 692 
 693   inline void set_humongous_is_live(oop obj);
 694 
 695   bool humongous_is_live(uint region) {
 696     return _humongous_is_live.is_live(region);
 697   }
 698 
 699   // Returns whether the given region (which must be a humongous (start) region)
 700   // is to be considered conservatively live regardless of any other conditions.
 701   bool humongous_region_is_always_live(uint index);
 702   // Register the given region to be part of the collection set.
 703   inline void register_humongous_region_with_in_cset_fast_test(uint index);
 704   // Register regions with humongous objects (actually on the start region) in
 705   // the in_cset_fast_test table.


1191   bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1192                                 HeapWord* from, HeapWord* limit);
1193 
1194   // Verify that the prev / next bitmap range [tams,end) for the given
1195   // region has no marks. Return true if all is well, false if errors
1196   // are detected.
1197   bool verify_bitmaps(const char* caller, HeapRegion* hr);
1198 #endif // PRODUCT
1199 
1200   // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1201   // the given region do not have any spurious marks. If errors are
1202   // detected, print appropriate error messages and crash.
1203   void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1204 
1205   // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1206   // have any spurious marks. If errors are detected, print
1207   // appropriate error messages and crash.
1208   void check_bitmaps(const char* caller) PRODUCT_RETURN;
1209 
1210   // Do sanity check on the contents of the in-cset fast test table.
1211   bool check_cset_fast_test();
1212 
1213   // verify_region_sets() performs verification over the region
1214   // lists. It will be compiled in the product code to be used when
1215   // necessary (i.e., during heap verification).
1216   void verify_region_sets();
1217 
1218   // verify_region_sets_optional() is planted in the code for
1219   // list verification in non-product builds (and it can be enabled in
1220   // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1221 #if HEAP_REGION_SET_FORCE_VERIFY
1222   void verify_region_sets_optional() {
1223     verify_region_sets();
1224   }
1225 #else // HEAP_REGION_SET_FORCE_VERIFY
1226   void verify_region_sets_optional() { }
1227 #endif // HEAP_REGION_SET_FORCE_VERIFY
1228 
1229 #ifdef ASSERT
1230   bool is_on_master_free_list(HeapRegion* hr) {
1231     return _hrm.is_free(hr);


1289   void prepend_to_freelist(FreeRegionList* list);
1290   void decrement_summary_bytes(size_t bytes);
1291 
1292   // Returns "TRUE" iff "p" points into the committed areas of the heap.
1293   virtual bool is_in(const void* p) const;
1294 #ifdef ASSERT
1295   // Returns whether p is in one of the available areas of the heap. Slow but
1296   // extensive version.
1297   bool is_in_exact(const void* p) const;
1298 #endif
1299 
1300   // Return "TRUE" iff the given object address is within the collection
1301   // set. Slow implementation.
1302   inline bool obj_in_cs(oop obj);
1303 
1304   inline bool is_in_cset(oop obj);
1305 
1306   inline bool is_in_cset_or_humongous(const oop obj);
1307 
1308  private:
1309   // Instances of this class are used for quick tests on whether a reference points
1310   // into the collection set and into which generation or is a humongous object
1311   //
1312   // Each of the array's elements indicates whether the corresponding region is in
1313   // the collection set and if so in which generation, or a humongous region.
1314   //
1315   // We use this to speed up reference processing during young collection and
1316   // quickly reclaim humongous objects. For the latter, by making a humongous region
1317   // succeed this test, we sort-of add it to the collection set. During the reference
1318   // iteration closures, when we see a humongous region, we then simply mark it as
1319   // referenced, i.e. live.
1320   class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<in_cset_state_t> {
1321    protected:
1322     in_cset_state_t default_value() const { return InCSetState::NotInCSet; }
1323    public:
1324     void set_humongous(uintptr_t index) {
1325       assert(get_by_index(index) == default_value(), "should be default");
1326       set_by_index(index, InCSetState::humongous());
1327     }
1328 
1329     void clear_humongous(uintptr_t index) {
1330       set_by_index(index, InCSetState::NotInCSet);
1331     }
1332 
1333     void set_in_young(uintptr_t index) {
1334       assert(get_by_index(index) == default_value(), "should be default");
1335       set_by_index(index, InCSetState::Young);
1336     }
1337 
1338     void set_in_old(uintptr_t index) {
1339       assert(get_by_index(index) == default_value(), "should be default");
1340       set_by_index(index, InCSetState::Old);
1341     }
1342 
1343     bool is_in_cset_or_humongous(HeapWord* addr) const { return InCSetState::is_in_cset_or_humongous(at(addr)); }
1344     bool is_in_cset(HeapWord* addr) const { return InCSetState::is_in_cset(at(addr)); }
1345     in_cset_state_t at(HeapWord* addr) const { return (in_cset_state_t) get_by_address(addr); }
1346     void clear() { G1BiasedMappedArray<in_cset_state_t>::clear(); }
1347   };
1348 
1349   // This array is used for a quick test on whether a reference points into
1350   // the collection set or not. Each of the array's elements denotes whether the
1351   // corresponding region is in the collection set or not.
1352   G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1353 
1354  public:
1355 
1356   inline in_cset_state_t in_cset_state(const oop obj);
1357 
1358   // Return "TRUE" iff the given object address is in the reserved
1359   // region of g1.
1360   bool is_in_g1_reserved(const void* p) const {
1361     return _hrm.reserved().contains(p);
1362   }
1363 
1364   // Returns a MemRegion that corresponds to the space that has been
1365   // reserved for the heap
1366   MemRegion g1_reserved() const {
1367     return _hrm.reserved();
1368   }




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1Allocator.hpp"
  30 #include "gc_implementation/g1/concurrentMark.hpp"
  31 #include "gc_implementation/g1/evacuationInfo.hpp"
  32 #include "gc_implementation/g1/g1AllocRegion.hpp"
  33 #include "gc_implementation/g1/g1BiasedArray.hpp"
  34 #include "gc_implementation/g1/g1HRPrinter.hpp"
  35 #include "gc_implementation/g1/g1InCSetState.hpp"
  36 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
  37 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  38 #include "gc_implementation/g1/g1YCTypes.hpp"
  39 #include "gc_implementation/g1/heapRegionManager.hpp"
  40 #include "gc_implementation/g1/heapRegionSet.hpp"
  41 #include "gc_implementation/shared/hSpaceCounters.hpp"
  42 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  43 #include "memory/barrierSet.hpp"
  44 #include "memory/memRegion.hpp"
  45 #include "memory/sharedHeap.hpp"
  46 #include "utilities/stack.hpp"
  47 
  48 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  49 // It uses the "Garbage First" heap organization and algorithm, which
  50 // may combine concurrent marking with parallel, incremental compaction of
  51 // heap subsets that will yield large amounts of garbage.
  52 
  53 // Forward declarations
  54 class HeapRegion;
  55 class HRRSCleanupTask;


 531                                          int* gclocker_retry_count_ret);
 532 
 533   // Allocation attempt that should be called during safepoints (e.g.,
 534   // at the end of a successful GC). expect_null_mutator_alloc_region
 535   // specifies whether the mutator alloc region is expected to be NULL
 536   // or not.
 537   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 538                                             AllocationContext_t context,
 539                                             bool expect_null_mutator_alloc_region);
 540 
 541   // It dirties the cards that cover the block so that so that the post
 542   // write barrier never queues anything when updating objects on this
 543   // block. It is assumed (and in fact we assert) that the block
 544   // belongs to a young region.
 545   inline void dirty_young_block(HeapWord* start, size_t word_size);
 546 
 547   // Allocate blocks during garbage collection. Will ensure an
 548   // allocation region, either by picking one or expanding the
 549   // heap, and then allocate a block of the given size. The block
 550   // may not be a humongous - it must fit into a single heap region.
 551   inline HeapWord* par_allocate_during_gc(in_cset_state_t dest,
 552                                           size_t word_size,
 553                                           AllocationContext_t context);













 554   // Ensure that no further allocations can happen in "r", bearing in mind
 555   // that parallel threads might be attempting allocations.
 556   void par_allocate_remaining_space(HeapRegion* r);
 557 
 558   // Allocation attempt during GC for a survivor object / PLAB.
 559   inline HeapWord* survivor_attempt_allocation(size_t word_size,
 560                                                AllocationContext_t context);
 561 
 562   // Allocation attempt during GC for an old object / PLAB.
 563   inline HeapWord* old_attempt_allocation(size_t word_size,
 564                                           AllocationContext_t context);
 565 
 566   // These methods are the "callbacks" from the G1AllocRegion class.
 567 
 568   // For mutator alloc regions.
 569   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 570   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 571                                    size_t allocated_bytes);
 572 
 573   // For GC alloc regions.


 619   void enqueue_discovered_references(uint no_of_gc_workers);
 620 
 621 public:
 622 
 623   G1Allocator* allocator() {
 624     return _allocator;
 625   }
 626 
 627   G1MonitoringSupport* g1mm() {
 628     assert(_g1mm != NULL, "should have been initialized");
 629     return _g1mm;
 630   }
 631 
 632   // Expand the garbage-first heap by at least the given size (in bytes!).
 633   // Returns true if the heap was expanded by the requested amount;
 634   // false otherwise.
 635   // (Rounds up to a HeapRegion boundary.)
 636   bool expand(size_t expand_bytes);
 637 
 638   // Returns the PLAB statistics for a given destination.
 639   inline PLABStats* alloc_buffer_stats(in_cset_state_t dest);












 640 
 641   // Determines PLAB size for a given destination.
 642   inline size_t desired_plab_sz(in_cset_state_t dest);



















 643 
 644   inline AllocationContextStats& allocation_context_stats();
 645 
 646   // Do anything common to GC's.
 647   virtual void gc_prologue(bool full);
 648   virtual void gc_epilogue(bool full);
 649 
 650   inline void set_humongous_is_live(oop obj);
 651 
 652   bool humongous_is_live(uint region) {
 653     return _humongous_is_live.is_live(region);
 654   }
 655 
 656   // Returns whether the given region (which must be a humongous (start) region)
 657   // is to be considered conservatively live regardless of any other conditions.
 658   bool humongous_region_is_always_live(uint index);
 659   // Register the given region to be part of the collection set.
 660   inline void register_humongous_region_with_in_cset_fast_test(uint index);
 661   // Register regions with humongous objects (actually on the start region) in
 662   // the in_cset_fast_test table.


1148   bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1149                                 HeapWord* from, HeapWord* limit);
1150 
1151   // Verify that the prev / next bitmap range [tams,end) for the given
1152   // region has no marks. Return true if all is well, false if errors
1153   // are detected.
1154   bool verify_bitmaps(const char* caller, HeapRegion* hr);
1155 #endif // PRODUCT
1156 
1157   // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1158   // the given region do not have any spurious marks. If errors are
1159   // detected, print appropriate error messages and crash.
1160   void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1161 
1162   // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1163   // have any spurious marks. If errors are detected, print
1164   // appropriate error messages and crash.
1165   void check_bitmaps(const char* caller) PRODUCT_RETURN;
1166 
1167   // Do sanity check on the contents of the in-cset fast test table.
1168   bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
1169 
1170   // verify_region_sets() performs verification over the region
1171   // lists. It will be compiled in the product code to be used when
1172   // necessary (i.e., during heap verification).
1173   void verify_region_sets();
1174 
1175   // verify_region_sets_optional() is planted in the code for
1176   // list verification in non-product builds (and it can be enabled in
1177   // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1178 #if HEAP_REGION_SET_FORCE_VERIFY
1179   void verify_region_sets_optional() {
1180     verify_region_sets();
1181   }
1182 #else // HEAP_REGION_SET_FORCE_VERIFY
1183   void verify_region_sets_optional() { }
1184 #endif // HEAP_REGION_SET_FORCE_VERIFY
1185 
1186 #ifdef ASSERT
1187   bool is_on_master_free_list(HeapRegion* hr) {
1188     return _hrm.is_free(hr);


1246   void prepend_to_freelist(FreeRegionList* list);
1247   void decrement_summary_bytes(size_t bytes);
1248 
1249   // Returns "TRUE" iff "p" points into the committed areas of the heap.
1250   virtual bool is_in(const void* p) const;
1251 #ifdef ASSERT
1252   // Returns whether p is in one of the available areas of the heap. Slow but
1253   // extensive version.
1254   bool is_in_exact(const void* p) const;
1255 #endif
1256 
1257   // Return "TRUE" iff the given object address is within the collection
1258   // set. Slow implementation.
1259   inline bool obj_in_cs(oop obj);
1260 
1261   inline bool is_in_cset(oop obj);
1262 
1263   inline bool is_in_cset_or_humongous(const oop obj);
1264 
1265  private:








































1266   // This array is used for a quick test on whether a reference points into
1267   // the collection set or not. Each of the array's elements denotes whether the
1268   // corresponding region is in the collection set or not.
1269   G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1270 
1271  public:
1272 
1273   inline in_cset_state_t in_cset_state(const oop obj);
1274 
1275   // Return "TRUE" iff the given object address is in the reserved
1276   // region of g1.
1277   bool is_in_g1_reserved(const void* p) const {
1278     return _hrm.reserved().contains(p);
1279   }
1280 
1281   // Returns a MemRegion that corresponds to the space that has been
1282   // reserved for the heap
1283   MemRegion g1_reserved() const {
1284     return _hrm.reserved();
1285   }


< prev index next >