< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 60059 : imported patch 8210462-fix-remaining-mentions-of-im


 516   void abort_refinement();
 517   void verify_after_full_collection();
 518   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 519 
 520   // Helper method for satisfy_failed_allocation()
 521   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 522                                              bool do_gc,
 523                                              bool clear_all_soft_refs,
 524                                              bool expect_null_mutator_alloc_region,
 525                                              bool* gc_succeeded);
 526 
 527   // Attempting to expand the heap sufficiently
 528   // to support an allocation of the given "word_size".  If
 529   // successful, perform the allocation and return the address of the
 530   // allocated block, or else "NULL".
 531   HeapWord* expand_and_allocate(size_t word_size);
 532 
 533   // Process any reference objects discovered.
 534   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 535 
 536   // If during an initial mark pause we may install a pending list head which is not
 537   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 538   // to discover.
 539   void make_pending_list_reachable();
 540 
 541   // Merges the information gathered on a per-thread basis for all worker threads
 542   // during GC into global variables.
 543   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 544 
 545   void verify_numa_regions(const char* desc);
 546 
 547 public:
 548   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 549 
 550   WorkGang* workers() const { return _workers; }
 551 
 552   // Runs the given AbstractGangTask with the current active workers, returning the
 553   // total time taken.
 554   Tickspan run_task(AbstractGangTask* task);
 555 
 556   G1Allocator* allocator() {


 839 #ifndef PRODUCT
 840   // Support for forcing evacuation failures. Analogous to
 841   // PromotionFailureALot for the other collectors.
 842 
 843   // Records whether G1EvacuationFailureALot should be in effect
 844   // for the current GC
 845   bool _evacuation_failure_alot_for_current_gc;
 846 
 847   // Used to record the GC number for interval checking when
 848   // determining whether G1EvaucationFailureALot is in effect
 849   // for the current GC.
 850   size_t _evacuation_failure_alot_gc_number;
 851 
 852   // Count of the number of evacuations between failures.
 853   volatile size_t _evacuation_failure_alot_count;
 854 
 855   // Set whether G1EvacuationFailureALot should be in effect
 856   // for the current GC (based upon the type of GC and which
 857   // command line flags are set);
 858   inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
 859                                                   bool during_initial_mark,
 860                                                   bool mark_or_rebuild_in_progress);
 861 
 862   inline void set_evacuation_failure_alot_for_current_gc();
 863 
 864   // Return true if it's time to cause an evacuation failure.
 865   inline bool evacuation_should_fail();
 866 
 867   // Reset the G1EvacuationFailureALot counters.  Should be called at
 868   // the end of an evacuation pause in which an evacuation failure occurred.
 869   inline void reset_evacuation_should_fail();
 870 #endif // !PRODUCT
 871 
 872   // ("Weak") Reference processing support.
 873   //
 874   // G1 has 2 instances of the reference processor class. One
 875   // (_ref_processor_cm) handles reference object discovery
 876   // and subsequent processing during concurrent marking cycles.
 877   //
 878   // The other (_ref_processor_stw) handles reference object
 879   // discovery and processing during full GCs and incremental


 899   // At the start of full GC we:
 900   //  * Disable discovery by the CM ref processor and
 901   //    empty CM ref processor's discovered lists
 902   //    (without processing any entries).
 903   //  * Verify that the STW ref processor is inactive and it's
 904   //    discovered lists are empty.
 905   //  * Temporarily set STW ref processor discovery as single threaded.
 906   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 907   //    field.
 908   //  * Finally enable discovery by the STW ref processor.
 909   //
 910   // The STW ref processor is used to record any discovered
 911   // references during the full GC.
 912   //
 913   // At the end of a full GC we:
 914   //  * Enqueue any reference objects discovered by the STW ref processor
 915   //    that have non-live referents. This has the side-effect of
 916   //    making the STW ref processor inactive by disabling discovery.
 917   //  * Verify that the CM ref processor is still inactive
 918   //    and no references have been placed on it's discovered
 919   //    lists (also checked as a precondition during initial marking).
 920 
 921   // The (stw) reference processor...
 922   ReferenceProcessor* _ref_processor_stw;
 923 
 924   // During reference object discovery, the _is_alive_non_header
 925   // closure (if non-null) is applied to the referent object to
 926   // determine whether the referent is live. If so then the
 927   // reference object does not need to be 'discovered' and can
 928   // be treated as a regular oop. This has the benefit of reducing
 929   // the number of 'discovered' reference objects that need to
 930   // be processed.
 931   //
 932   // Instance of the is_alive closure for embedding into the
 933   // STW reference processor as the _is_alive_non_header field.
 934   // Supplying a value for the _is_alive_non_header field is
 935   // optional but doing so prevents unnecessary additions to
 936   // the discovered lists during reference discovery.
 937   G1STWIsAliveClosure _is_alive_closure_stw;
 938 
 939   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;




 516   void abort_refinement();
 517   void verify_after_full_collection();
 518   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 519 
 520   // Helper method for satisfy_failed_allocation()
 521   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 522                                              bool do_gc,
 523                                              bool clear_all_soft_refs,
 524                                              bool expect_null_mutator_alloc_region,
 525                                              bool* gc_succeeded);
 526 
 527   // Attempting to expand the heap sufficiently
 528   // to support an allocation of the given "word_size".  If
 529   // successful, perform the allocation and return the address of the
 530   // allocated block, or else "NULL".
 531   HeapWord* expand_and_allocate(size_t word_size);
 532 
 533   // Process any reference objects discovered.
 534   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 535 
 536   // If during a concurrent start pause we may install a pending list head which is not
 537   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 538   // to discover.
 539   void make_pending_list_reachable();
 540 
 541   // Merges the information gathered on a per-thread basis for all worker threads
 542   // during GC into global variables.
 543   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 544 
 545   void verify_numa_regions(const char* desc);
 546 
 547 public:
 548   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 549 
 550   WorkGang* workers() const { return _workers; }
 551 
 552   // Runs the given AbstractGangTask with the current active workers, returning the
 553   // total time taken.
 554   Tickspan run_task(AbstractGangTask* task);
 555 
 556   G1Allocator* allocator() {


 839 #ifndef PRODUCT
 840   // Support for forcing evacuation failures. Analogous to
 841   // PromotionFailureALot for the other collectors.
 842 
 843   // Records whether G1EvacuationFailureALot should be in effect
 844   // for the current GC
 845   bool _evacuation_failure_alot_for_current_gc;
 846 
 847   // Used to record the GC number for interval checking when
 848   // determining whether G1EvaucationFailureALot is in effect
 849   // for the current GC.
 850   size_t _evacuation_failure_alot_gc_number;
 851 
 852   // Count of the number of evacuations between failures.
 853   volatile size_t _evacuation_failure_alot_count;
 854 
 855   // Set whether G1EvacuationFailureALot should be in effect
 856   // for the current GC (based upon the type of GC and which
 857   // command line flags are set);
 858   inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
 859                                                   bool during_concurrent_start,
 860                                                   bool mark_or_rebuild_in_progress);
 861 
 862   inline void set_evacuation_failure_alot_for_current_gc();
 863 
 864   // Return true if it's time to cause an evacuation failure.
 865   inline bool evacuation_should_fail();
 866 
 867   // Reset the G1EvacuationFailureALot counters.  Should be called at
 868   // the end of an evacuation pause in which an evacuation failure occurred.
 869   inline void reset_evacuation_should_fail();
 870 #endif // !PRODUCT
 871 
 872   // ("Weak") Reference processing support.
 873   //
 874   // G1 has 2 instances of the reference processor class. One
 875   // (_ref_processor_cm) handles reference object discovery
 876   // and subsequent processing during concurrent marking cycles.
 877   //
 878   // The other (_ref_processor_stw) handles reference object
 879   // discovery and processing during full GCs and incremental


 899   // At the start of full GC we:
 900   //  * Disable discovery by the CM ref processor and
 901   //    empty CM ref processor's discovered lists
 902   //    (without processing any entries).
 903   //  * Verify that the STW ref processor is inactive and it's
 904   //    discovered lists are empty.
 905   //  * Temporarily set STW ref processor discovery as single threaded.
 906   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 907   //    field.
 908   //  * Finally enable discovery by the STW ref processor.
 909   //
 910   // The STW ref processor is used to record any discovered
 911   // references during the full GC.
 912   //
 913   // At the end of a full GC we:
 914   //  * Enqueue any reference objects discovered by the STW ref processor
 915   //    that have non-live referents. This has the side-effect of
 916   //    making the STW ref processor inactive by disabling discovery.
 917   //  * Verify that the CM ref processor is still inactive
 918   //    and no references have been placed on it's discovered
 919   //    lists (also checked as a precondition during concurrent start).
 920 
 921   // The (stw) reference processor...
 922   ReferenceProcessor* _ref_processor_stw;
 923 
 924   // During reference object discovery, the _is_alive_non_header
 925   // closure (if non-null) is applied to the referent object to
 926   // determine whether the referent is live. If so then the
 927   // reference object does not need to be 'discovered' and can
 928   // be treated as a regular oop. This has the benefit of reducing
 929   // the number of 'discovered' reference objects that need to
 930   // be processed.
 931   //
 932   // Instance of the is_alive closure for embedding into the
 933   // STW reference processor as the _is_alive_non_header field.
 934   // Supplying a value for the _is_alive_non_header field is
 935   // optional but doing so prevents unnecessary additions to
 936   // the discovered lists during reference discovery.
 937   G1STWIsAliveClosure _is_alive_closure_stw;
 938 
 939   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;


< prev index next >