< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page




 335   do {                                                                        \
 336     assert(!Heap_lock->owned_by_self(),                                       \
 337         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 338   } while (0)
 339 
 340 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 341   do {                                                                        \
 342     assert(!Heap_lock->owned_by_self() &&                                     \
 343                                     !SafepointSynchronize::is_at_safepoint(), \
 344       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 345                                    "should not be at a safepoint"));          \
 346   } while (0)
 347 
 348 #define assert_at_safepoint_on_vm_thread()                                    \
 349   do {                                                                        \
 350     assert_at_safepoint();                                                    \
 351     assert(Thread::current_or_null() != NULL, "no current thread");           \
 352     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 353   } while (0)
 354 
 355   // The young region list.
 356   G1EdenRegions _eden;
 357   G1SurvivorRegions _survivor;
 358 
 359   STWGCTimer* _gc_timer_stw;
 360 
 361   G1NewTracer* _gc_tracer_stw;
 362 
 363   // The current policy object for the collector.
 364   G1Policy* _g1_policy;
 365   G1HeapSizingPolicy* _heap_sizing_policy;
 366 
 367   G1CollectionSet _collection_set;
 368 
 369   // Try to allocate a single non-humongous HeapRegion sufficient for
 370   // an allocation of the given word_size. If do_expand is true,
 371   // attempt to expand the heap if necessary to satisfy the allocation
 372   // request. If the region is to be used as an old region or for a
 373   // humongous object, set is_old to true. If not, to false.
 374   HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
 375 


1024 
1025   MemoryUsage get_auxiliary_data_memory_usage() const {
1026     return _hrm.get_auxiliary_data_memory_usage();
1027   }
1028 
1029   // The number of regions that are not completely free.
1030   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1031 
1032 #ifdef ASSERT
1033   bool is_on_master_free_list(HeapRegion* hr) {
1034     return _hrm.is_free(hr);
1035   }
1036 #endif // ASSERT
1037 
1038   inline void old_set_add(HeapRegion* hr);
1039   inline void old_set_remove(HeapRegion* hr);
1040 
1041   inline void archive_set_add(HeapRegion* hr);
1042 
1043   size_t non_young_capacity_bytes() {
1044     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1045   }
1046 
1047   // Determine whether the given region is one that we are using as an
1048   // old GC alloc region.
1049   bool is_old_gc_alloc_region(HeapRegion* hr);
1050 
1051   // Perform a collection of the heap; intended for use in implementing
1052   // "System.gc".  This probably implies as full a collection as the
1053   // "CollectedHeap" supports.
1054   virtual void collect(GCCause::Cause cause);
1055 
1056   // True iff an evacuation has failed in the most-recent collection.
1057   bool evacuation_failed() { return _evacuation_failed; }
1058 
1059   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1060   void prepend_to_freelist(FreeRegionList* list);
1061   void decrement_summary_bytes(size_t bytes);
1062 
1063   virtual bool is_in(const void* p) const;
1064 #ifdef ASSERT


1199     // ensure that we don't try to allocate a TLAB as
1200     // humongous and that we don't allocate a humongous
1201     // object in a TLAB.
1202     return word_size > _humongous_object_threshold_in_words;
1203   }
1204 
1205   // Returns the humongous threshold for a specific region size
1206   static size_t humongous_threshold_for(size_t region_size) {
1207     return (region_size / 2);
1208   }
1209 
1210   // Returns the number of regions the humongous object of the given word size
1211   // requires.
1212   static size_t humongous_obj_size_in_regions(size_t word_size);
1213 
1214   // Print the maximum heap capacity.
1215   virtual size_t max_capacity() const;
1216 
1217   virtual jlong millis_since_last_gc();
1218 
1219 
1220   // Convenience function to be used in situations where the heap type can be
1221   // asserted to be this type.
1222   static G1CollectedHeap* heap();
1223 
1224   void set_region_short_lived_locked(HeapRegion* hr);
1225   // add appropriate methods for any other surv rate groups
1226 
1227   const G1SurvivorRegions* survivor() const { return &_survivor; }
1228 
1229   uint eden_regions_count() const { return _eden.length(); }
1230   uint survivor_regions_count() const { return _survivor.length(); }
1231   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1232   uint old_regions_count() const { return _old_set.length(); }
1233   uint archive_regions_count() const { return _archive_set.length(); }
1234   uint humongous_regions_count() const { return _humongous_set.length(); }
1235 
1236 #ifdef ASSERT
1237   bool check_young_list_empty();
1238 #endif
1239 
1240   // *** Stuff related to concurrent marking.  It's not clear to me that so
1241   // many of these need to be public.
1242 
1243   // The functions below are helper functions that a subclass of
1244   // "CollectedHeap" can use in the implementation of its virtual
1245   // functions.
1246   // This performs a concurrent marking of the live objects in a
1247   // bitmap off to the side.
1248   void do_concurrent_mark();
1249 
1250   bool is_marked_next(oop obj) const;
1251 




 335   do {                                                                        \
 336     assert(!Heap_lock->owned_by_self(),                                       \
 337         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 338   } while (0)
 339 
 340 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 341   do {                                                                        \
 342     assert(!Heap_lock->owned_by_self() &&                                     \
 343                                     !SafepointSynchronize::is_at_safepoint(), \
 344       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 345                                    "should not be at a safepoint"));          \
 346   } while (0)
 347 
 348 #define assert_at_safepoint_on_vm_thread()                                    \
 349   do {                                                                        \
 350     assert_at_safepoint();                                                    \
 351     assert(Thread::current_or_null() != NULL, "no current thread");           \
 352     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 353   } while (0)
 354 
 355   // The young region lists.
 356   G1EdenRegions _eden;
 357   G1SurvivorRegions _survivor;
 358 
 359   STWGCTimer* _gc_timer_stw;
 360 
 361   G1NewTracer* _gc_tracer_stw;
 362 
 363   // The current policy object for the collector.
 364   G1Policy* _g1_policy;
 365   G1HeapSizingPolicy* _heap_sizing_policy;
 366 
 367   G1CollectionSet _collection_set;
 368 
 369   // Try to allocate a single non-humongous HeapRegion sufficient for
 370   // an allocation of the given word_size. If do_expand is true,
 371   // attempt to expand the heap if necessary to satisfy the allocation
 372   // request. If the region is to be used as an old region or for a
 373   // humongous object, set is_old to true. If not, to false.
 374   HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
 375 


1024 
1025   MemoryUsage get_auxiliary_data_memory_usage() const {
1026     return _hrm.get_auxiliary_data_memory_usage();
1027   }
1028 
1029   // The number of regions that are not completely free.
1030   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1031 
1032 #ifdef ASSERT
1033   bool is_on_master_free_list(HeapRegion* hr) {
1034     return _hrm.is_free(hr);
1035   }
1036 #endif // ASSERT
1037 
1038   inline void old_set_add(HeapRegion* hr);
1039   inline void old_set_remove(HeapRegion* hr);
1040 
1041   inline void archive_set_add(HeapRegion* hr);
1042 
1043   size_t non_young_capacity_bytes() {
1044     return (old_regions_count() + archive_regions_count() + humongous_regions_count()) * HeapRegion::GrainBytes;
1045   }
1046 
1047   // Determine whether the given region is one that we are using as an
1048   // old GC alloc region.
1049   bool is_old_gc_alloc_region(HeapRegion* hr);
1050 
1051   // Perform a collection of the heap; intended for use in implementing
1052   // "System.gc".  This probably implies as full a collection as the
1053   // "CollectedHeap" supports.
1054   virtual void collect(GCCause::Cause cause);
1055 
1056   // True iff an evacuation has failed in the most-recent collection.
1057   bool evacuation_failed() { return _evacuation_failed; }
1058 
1059   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1060   void prepend_to_freelist(FreeRegionList* list);
1061   void decrement_summary_bytes(size_t bytes);
1062 
1063   virtual bool is_in(const void* p) const;
1064 #ifdef ASSERT


1199     // ensure that we don't try to allocate a TLAB as
1200     // humongous and that we don't allocate a humongous
1201     // object in a TLAB.
1202     return word_size > _humongous_object_threshold_in_words;
1203   }
1204 
1205   // Returns the humongous threshold for a specific region size
1206   static size_t humongous_threshold_for(size_t region_size) {
1207     return (region_size / 2);
1208   }
1209 
1210   // Returns the number of regions the humongous object of the given word size
1211   // requires.
1212   static size_t humongous_obj_size_in_regions(size_t word_size);
1213 
1214   // Print the maximum heap capacity.
1215   virtual size_t max_capacity() const;
1216 
1217   virtual jlong millis_since_last_gc();
1218 

1219   // Convenience function to be used in situations where the heap type can be
1220   // asserted to be this type.
1221   static G1CollectedHeap* heap();
1222 
1223   void set_region_short_lived_locked(HeapRegion* hr);
1224   // add appropriate methods for any other survivor rate groups
1225 
1226   const G1SurvivorRegions* survivor() const { return &_survivor; }
1227 
1228   uint eden_regions_count() const { return _eden.length(); }
1229   uint survivor_regions_count() const { return _survivor.length(); }
1230   uint young_regions_count() const { return eden_regions_count() + survivor_regions_count(); }
1231   uint old_regions_count() const { return _old_set.length(); }
1232   uint archive_regions_count() const { return _archive_set.length(); }
1233   uint humongous_regions_count() const { return _humongous_set.length(); }
1234 
1235 #ifdef ASSERT
1236   bool check_young_list_empty();
1237 #endif
1238 
1239   // *** Stuff related to concurrent marking.  It's not clear to me that so
1240   // many of these need to be public.
1241 
1242   // The functions below are helper functions that a subclass of
1243   // "CollectedHeap" can use in the implementation of its virtual
1244   // functions.
1245   // This performs a concurrent marking of the live objects in a
1246   // bitmap off to the side.
1247   void do_concurrent_mark();
1248 
1249   bool is_marked_next(oop obj) const;
1250 


< prev index next >