< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 13331 : imported patch webrev.3b
rev 13332 : [mq]: webrev.4


 281   // called at the end of a GC and artificially expands the heap by
 282   // allocating a number of dead regions. This way we can induce very
 283   // frequent marking cycles and stress the cleanup / concurrent
 284   // cleanup code more (as all the regions that will be allocated by
 285   // this method will be found dead by the marking cycle).
 286   void allocate_dummy_regions() PRODUCT_RETURN;
 287 
 288   // Clear RSets after a compaction. It also resets the GC time stamps.
 289   void clear_rsets_post_compaction();
 290 
 291   // If the HR printer is active, dump the state of the regions in the
 292   // heap after a compaction.
 293   void print_hrm_post_compaction();
 294 
 295   // Create a memory mapper for auxiliary data structures of the given size and
 296   // translation factor.
 297   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 298                                                          size_t size,
 299                                                          size_t translation_factor);
 300 
 301   static G1Policy* create_g1_policy();
 302 
 303   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 304 
 305   void process_weak_jni_handles();
 306 
 307   // These are macros so that, if the assert fires, we get the correct
 308   // line number, file, etc.
 309 
 310 #define heap_locking_asserts_params(_extra_message_)                          \
 311   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 312   (_extra_message_),                                                          \
 313   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 314   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 315   BOOL_TO_STR(Thread::current()->is_VM_thread())
 316 
 317 #define assert_heap_locked()                                                  \
 318   do {                                                                        \
 319     assert(Heap_lock->owned_by_self(),                                        \
 320            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 321   } while (0)


 353 
 354 #define assert_at_safepoint(_should_be_vm_thread_)                            \
 355   do {                                                                        \
 356     assert(SafepointSynchronize::is_at_safepoint() &&                         \
 357               ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
 358            heap_locking_asserts_params("should be at a safepoint"));          \
 359   } while (0)
 360 
 361 #define assert_not_at_safepoint()                                             \
 362   do {                                                                        \
 363     assert(!SafepointSynchronize::is_at_safepoint(),                          \
 364            heap_locking_asserts_params("should not be at a safepoint"));      \
 365   } while (0)
 366 
 367 protected:
 368 
 369   // The young region list.
 370   G1EdenRegions _eden;
 371   G1SurvivorRegions _survivor;
 372 




 373   // The current policy object for the collector.
 374   G1Policy* _g1_policy;
 375   G1HeapSizingPolicy* _heap_sizing_policy;
 376 
 377   G1CollectionSet _collection_set;
 378 
 379   // This is the second level of trying to allocate a new region. If
 380   // new_region() didn't find a region on the free_list, this call will
 381   // check whether there's anything available on the
 382   // secondary_free_list and/or wait for more regions to appear on
 383   // that list, if _free_regions_coming is set.
 384   HeapRegion* new_region_try_secondary_free_list(bool is_old);
 385 
 386   // Try to allocate a single non-humongous HeapRegion sufficient for
 387   // an allocation of the given word_size. If do_expand is true,
 388   // attempt to expand the heap if necessary to satisfy the allocation
 389   // request. If the region is to be used as an old region or for a
 390   // humongous object, set is_old to true. If not, to false.
 391   HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
 392 


 883   //  * Verify that the STW ref processor is inactive and it's
 884   //    discovered lists are empty.
 885   //  * Temporarily set STW ref processor discovery as single threaded.
 886   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 887   //    field.
 888   //  * Finally enable discovery by the STW ref processor.
 889   //
 890   // The STW ref processor is used to record any discovered
 891   // references during the full GC.
 892   //
 893   // At the end of a full GC we:
 894   //  * Enqueue any reference objects discovered by the STW ref processor
 895   //    that have non-live referents. This has the side-effect of
 896   //    making the STW ref processor inactive by disabling discovery.
 897   //  * Verify that the CM ref processor is still inactive
 898   //    and no references have been placed on it's discovered
 899   //    lists (also checked as a precondition during initial marking).
 900 
 901   // The (stw) reference processor...
 902   ReferenceProcessor* _ref_processor_stw;
 903 
 904   STWGCTimer* _gc_timer_stw;
 905 
 906   G1NewTracer* _gc_tracer_stw;
 907 
 908   // During reference object discovery, the _is_alive_non_header
 909   // closure (if non-null) is applied to the referent object to
 910   // determine whether the referent is live. If so then the
 911   // reference object does not need to be 'discovered' and can
 912   // be treated as a regular oop. This has the benefit of reducing
 913   // the number of 'discovered' reference objects that need to
 914   // be processed.
 915   //
 916   // Instance of the is_alive closure for embedding into the
 917   // STW reference processor as the _is_alive_non_header field.
 918   // Supplying a value for the _is_alive_non_header field is
 919   // optional but doing so prevents unnecessary additions to
 920   // the discovered lists during reference discovery.
 921   G1STWIsAliveClosure _is_alive_closure_stw;
 922 
 923   // The (concurrent marking) reference processor...
 924   ReferenceProcessor* _ref_processor_cm;
 925 
 926   // Instance of the concurrent mark is_alive closure for embedding




 281   // called at the end of a GC and artificially expands the heap by
 282   // allocating a number of dead regions. This way we can induce very
 283   // frequent marking cycles and stress the cleanup / concurrent
 284   // cleanup code more (as all the regions that will be allocated by
 285   // this method will be found dead by the marking cycle).
 286   void allocate_dummy_regions() PRODUCT_RETURN;
 287 
 288   // Clear RSets after a compaction. It also resets the GC time stamps.
 289   void clear_rsets_post_compaction();
 290 
 291   // If the HR printer is active, dump the state of the regions in the
 292   // heap after a compaction.
 293   void print_hrm_post_compaction();
 294 
 295   // Create a memory mapper for auxiliary data structures of the given size and
 296   // translation factor.
 297   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 298                                                          size_t size,
 299                                                          size_t translation_factor);
 300 
 301   static G1Policy* create_g1_policy(STWGCTimer* gc_timer);
 302 
 303   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 304 
 305   void process_weak_jni_handles();
 306 
 307   // These are macros so that, if the assert fires, we get the correct
 308   // line number, file, etc.
 309 
 310 #define heap_locking_asserts_params(_extra_message_)                          \
 311   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 312   (_extra_message_),                                                          \
 313   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 314   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 315   BOOL_TO_STR(Thread::current()->is_VM_thread())
 316 
 317 #define assert_heap_locked()                                                  \
 318   do {                                                                        \
 319     assert(Heap_lock->owned_by_self(),                                        \
 320            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 321   } while (0)


 353 
 354 #define assert_at_safepoint(_should_be_vm_thread_)                            \
 355   do {                                                                        \
 356     assert(SafepointSynchronize::is_at_safepoint() &&                         \
 357               ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
 358            heap_locking_asserts_params("should be at a safepoint"));          \
 359   } while (0)
 360 
 361 #define assert_not_at_safepoint()                                             \
 362   do {                                                                        \
 363     assert(!SafepointSynchronize::is_at_safepoint(),                          \
 364            heap_locking_asserts_params("should not be at a safepoint"));      \
 365   } while (0)
 366 
 367 protected:
 368 
 369   // The young region list.
 370   G1EdenRegions _eden;
 371   G1SurvivorRegions _survivor;
 372 
 373   STWGCTimer* _gc_timer_stw;
 374 
 375   G1NewTracer* _gc_tracer_stw;
 376 
 377   // The current policy object for the collector.
 378   G1Policy* _g1_policy;
 379   G1HeapSizingPolicy* _heap_sizing_policy;
 380 
 381   G1CollectionSet _collection_set;
 382 
 383   // This is the second level of trying to allocate a new region. If
 384   // new_region() didn't find a region on the free_list, this call will
 385   // check whether there's anything available on the
 386   // secondary_free_list and/or wait for more regions to appear on
 387   // that list, if _free_regions_coming is set.
 388   HeapRegion* new_region_try_secondary_free_list(bool is_old);
 389 
 390   // Try to allocate a single non-humongous HeapRegion sufficient for
 391   // an allocation of the given word_size. If do_expand is true,
 392   // attempt to expand the heap if necessary to satisfy the allocation
 393   // request. If the region is to be used as an old region or for a
 394   // humongous object, set is_old to true. If not, to false.
 395   HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
 396 


 887   //  * Verify that the STW ref processor is inactive and it's
 888   //    discovered lists are empty.
 889   //  * Temporarily set STW ref processor discovery as single threaded.
 890   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 891   //    field.
 892   //  * Finally enable discovery by the STW ref processor.
 893   //
 894   // The STW ref processor is used to record any discovered
 895   // references during the full GC.
 896   //
 897   // At the end of a full GC we:
 898   //  * Enqueue any reference objects discovered by the STW ref processor
 899   //    that have non-live referents. This has the side-effect of
 900   //    making the STW ref processor inactive by disabling discovery.
 901   //  * Verify that the CM ref processor is still inactive
 902   //    and no references have been placed on it's discovered
 903   //    lists (also checked as a precondition during initial marking).
 904 
 905   // The (stw) reference processor...
 906   ReferenceProcessor* _ref_processor_stw;




 907 
 908   // During reference object discovery, the _is_alive_non_header
 909   // closure (if non-null) is applied to the referent object to
 910   // determine whether the referent is live. If so then the
 911   // reference object does not need to be 'discovered' and can
 912   // be treated as a regular oop. This has the benefit of reducing
 913   // the number of 'discovered' reference objects that need to
 914   // be processed.
 915   //
 916   // Instance of the is_alive closure for embedding into the
 917   // STW reference processor as the _is_alive_non_header field.
 918   // Supplying a value for the _is_alive_non_header field is
 919   // optional but doing so prevents unnecessary additions to
 920   // the discovered lists during reference discovery.
 921   G1STWIsAliveClosure _is_alive_closure_stw;
 922 
 923   // The (concurrent marking) reference processor...
 924   ReferenceProcessor* _ref_processor_cm;
 925 
 926   // Instance of the concurrent mark is_alive closure for embedding


< prev index next >