< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 53988 : imported patch 8219100-cleanup-young-collection-prologue


 340   do {                                                                        \
 341     assert(!Heap_lock->owned_by_self(),                                       \
 342         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 343   } while (0)
 344 
 345 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 346   do {                                                                        \
 347     assert(!Heap_lock->owned_by_self() &&                                     \
 348                                     !SafepointSynchronize::is_at_safepoint(), \
 349       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 350                                    "should not be at a safepoint"));          \
 351   } while (0)
 352 
 353 #define assert_at_safepoint_on_vm_thread()                                    \
 354   do {                                                                        \
 355     assert_at_safepoint();                                                    \
 356     assert(Thread::current_or_null() != NULL, "no current thread");           \
 357     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 358   } while (0)
 359 


 360   // The young region list.
 361   G1EdenRegions _eden;
 362   G1SurvivorRegions _survivor;
 363 
 364   STWGCTimer* _gc_timer_stw;
 365 
 366   G1NewTracer* _gc_tracer_stw;
 367 
 368   // The current policy object for the collector.
 369   G1Policy* _policy;
 370   G1HeapSizingPolicy* _heap_sizing_policy;
 371 
 372   G1CollectionSet _collection_set;
 373 
 374   // Try to allocate a single non-humongous HeapRegion sufficient for
 375   // an allocation of the given word_size. If do_expand is true,
 376   // attempt to expand the heap if necessary to satisfy the allocation
 377   // request. 'type' takes the type of region to be allocated. (Use constants
 378   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 379   HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);


 713   // return whether the VM operation was successful (it did do an
 714   // evacuation pause) or not (another thread beat us to it or the GC
 715   // locker was active). Given that we should not be holding the
 716   // Heap_lock when we enter this method, we will pass the
 717   // gc_count_before (i.e., total_collections()) as a parameter since
 718   // it has to be read while holding the Heap_lock. Currently, both
 719   // methods that call do_collection_pause() release the Heap_lock
 720   // before the call, so it's easy to read gc_count_before just before.
 721   HeapWord* do_collection_pause(size_t         word_size,
 722                                 uint           gc_count_before,
 723                                 bool*          succeeded,
 724                                 GCCause::Cause gc_cause);
 725 
 726   void wait_for_root_region_scanning();
 727 
 728   // The guts of the incremental collection pause, executed by the vm
 729   // thread. It returns false if it is unable to do the collection due
 730   // to the GC locker being active, true otherwise
 731   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 732 






 733   // Actually do the work of evacuating the collection set.
 734   void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
 735   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 736   void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
 737 
 738   void pre_evacuate_collection_set();
 739   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 740 

 741   // Update object copying statistics.
 742   void record_obj_copy_mem_stats();
 743 
 744   // The hot card cache for remembered set insertion optimization.
 745   G1HotCardCache* _hot_card_cache;
 746 
 747   // The g1 remembered set of the heap.
 748   G1RemSet* _rem_set;
 749 
 750   // A set of cards that cover the objects for which the Rsets should be updated
 751   // concurrently after the collection.
 752   G1DirtyCardQueueSet _dirty_card_queue_set;
 753 
 754   // After a collection pause, convert the regions in the collection set into free
 755   // regions.
 756   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 757 
 758   // Abandon the current collection set without recording policy
 759   // statistics or updating free lists.
 760   void abandon_collection_set(G1CollectionSet* collection_set);




 340   do {                                                                        \
 341     assert(!Heap_lock->owned_by_self(),                                       \
 342         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 343   } while (0)
 344 
 345 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 346   do {                                                                        \
 347     assert(!Heap_lock->owned_by_self() &&                                     \
 348                                     !SafepointSynchronize::is_at_safepoint(), \
 349       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 350                                    "should not be at a safepoint"));          \
 351   } while (0)
 352 
 353 #define assert_at_safepoint_on_vm_thread()                                    \
 354   do {                                                                        \
 355     assert_at_safepoint();                                                    \
 356     assert(Thread::current_or_null() != NULL, "no current thread");           \
 357     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 358   } while (0)
 359 
 360   const char* young_gc_name() const;
 361 
 362   // The young region list.
 363   G1EdenRegions _eden;
 364   G1SurvivorRegions _survivor;
 365 
 366   STWGCTimer* _gc_timer_stw;
 367 
 368   G1NewTracer* _gc_tracer_stw;
 369 
 370   // The current policy object for the collector.
 371   G1Policy* _policy;
 372   G1HeapSizingPolicy* _heap_sizing_policy;
 373 
 374   G1CollectionSet _collection_set;
 375 
 376   // Try to allocate a single non-humongous HeapRegion sufficient for
 377   // an allocation of the given word_size. If do_expand is true,
 378   // attempt to expand the heap if necessary to satisfy the allocation
 379   // request. 'type' takes the type of region to be allocated. (Use constants
 380   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 381   HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);


 715   // return whether the VM operation was successful (it did do an
 716   // evacuation pause) or not (another thread beat us to it or the GC
 717   // locker was active). Given that we should not be holding the
 718   // Heap_lock when we enter this method, we will pass the
 719   // gc_count_before (i.e., total_collections()) as a parameter since
 720   // it has to be read while holding the Heap_lock. Currently, both
 721   // methods that call do_collection_pause() release the Heap_lock
 722   // before the call, so it's easy to read gc_count_before just before.
 723   HeapWord* do_collection_pause(size_t         word_size,
 724                                 uint           gc_count_before,
 725                                 bool*          succeeded,
 726                                 GCCause::Cause gc_cause);
 727 
 728   void wait_for_root_region_scanning();
 729 
 730   // The guts of the incremental collection pause, executed by the vm
 731   // thread. It returns false if it is unable to do the collection due
 732   // to the GC locker being active, true otherwise
 733   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 734 
 735   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 736   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 737   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 738 
 739   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 740 
 741   // Actually do the work of evacuating the collection set.
 742   void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
 743   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 744   void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
 745 
 746   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info);
 747   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 748 
 749   void expand_heap_after_young_collection();
 750   // Update object copying statistics.
 751   void record_obj_copy_mem_stats();
 752 
 753   // The hot card cache for remembered set insertion optimization.
 754   G1HotCardCache* _hot_card_cache;
 755 
 756   // The g1 remembered set of the heap.
 757   G1RemSet* _rem_set;
 758 
 759   // A set of cards that cover the objects for which the Rsets should be updated
 760   // concurrently after the collection.
 761   G1DirtyCardQueueSet _dirty_card_queue_set;
 762 
 763   // After a collection pause, convert the regions in the collection set into free
 764   // regions.
 765   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 766 
 767   // Abandon the current collection set without recording policy
 768   // statistics or updating free lists.
 769   void abandon_collection_set(G1CollectionSet* collection_set);


< prev index next >