< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 10750 : [mq]: 8153503-cleanup-remset-iteration


  80 
  81 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  82 
  83 // INVARIANTS/NOTES
  84 //
  85 // All allocation activity covered by the G1CollectedHeap interface is
  86 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  87 // and allocate_new_tlab, which are the "entry" points to the
  88 // allocation code from the rest of the JVM.  (Note that this does not
  89 // apply to TLAB allocation, which is not part of this interface: it
  90 // is done by clients of this interface.)
  91 
  92 // Local to this file.
  93 
  94 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  95   bool _concurrent;
  96 public:
  97   RefineCardTableEntryClosure() : _concurrent(true) { }
  98 
  99   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 100     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
 101     // This path is executed by the concurrent refine or mutator threads,
 102     // concurrently, and so we do not care if card_ptr contains references
 103     // that point into the collection set.
 104     assert(!oops_into_cset, "should be");
 105 
 106     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 107       // Caller will actually yield.
 108       return false;
 109     }
 110     // Otherwise, we finished successfully; return true.
 111     return true;
 112   }
 113 
 114   void set_concurrent(bool b) { _concurrent = b; }
 115 };
 116 
 117 
 118 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 119  private:
 120   size_t _num_dirtied;


3060   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3061   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3062                                                                   cl.total_humongous(),
3063                                                                   cl.candidate_humongous());
3064   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3065 
3066   // Finally flush all remembered set entries to re-check into the global DCQS.
3067   cl.flush_rem_set_entries();
3068 }
3069 
3070 class VerifyRegionRemSetClosure : public HeapRegionClosure {
3071   public:
3072     bool doHeapRegion(HeapRegion* hr) {
3073       if (!hr->is_archive() && !hr->is_continues_humongous()) {
3074         hr->verify_rem_set();
3075       }
3076       return false;
3077     }
3078 };
3079 
3080 #ifdef ASSERT
3081 class VerifyCSetClosure: public HeapRegionClosure {
3082 public:
3083   bool doHeapRegion(HeapRegion* hr) {
3084     // Here we check that the CSet region's RSet is ready for parallel
3085     // iteration. The fields that we'll verify are only manipulated
3086     // when the region is part of a CSet and is collected. Afterwards,
3087     // we reset these fields when we clear the region's RSet (when the
3088     // region is freed) so they are ready when the region is
3089     // re-allocated. The only exception to this is if there's an
3090     // evacuation failure and instead of freeing the region we leave
3091     // it in the heap. In that case, we reset these fields during
3092     // evacuation failure handling.
3093     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3094 
3095     // Here's a good place to add any other checks we'd like to
3096     // perform on CSet regions.
3097     return false;
3098   }
3099 };
3100 #endif // ASSERT
3101 
3102 uint G1CollectedHeap::num_task_queues() const {
3103   return _task_queues->size();
3104 }
3105 
3106 #if TASKQUEUE_STATS
3107 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3108   st->print_raw_cr("GC Task Stats");
3109   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3110   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3111 }
3112 
3113 void G1CollectedHeap::print_taskqueue_stats() const {
3114   if (!log_is_enabled(Trace, gc, task, stats)) {
3115     return;
3116   }
3117   Log(gc, task, stats) log;
3118   ResourceMark rm;
3119   outputStream* st = log.trace_stream();
3120 
3121   print_taskqueue_stats_hdr(st);


3323         // If the remembered sets are not up to date we might miss some
3324         // entries that need to be handled.
3325         g1_rem_set()->cleanupHRRS();
3326 
3327         register_humongous_regions_with_cset();
3328 
3329         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3330 
3331         _cm->note_start_of_gc();
3332         // We call this after finalize_cset() to
3333         // ensure that the CSet has been finalized.
3334         _cm->verify_no_cset_oops();
3335 
3336         if (_hr_printer.is_active()) {
3337           HeapRegion* hr = collection_set()->head();
3338           while (hr != NULL) {
3339             _hr_printer.cset(hr);
3340             hr = hr->next_in_collection_set();
3341           }
3342         }
3343 
3344 #ifdef ASSERT
3345         VerifyCSetClosure cl;
3346         collection_set_iterate(&cl);
3347 #endif // ASSERT
3348 
3349         // Initialize the GC alloc regions.
3350         _allocator->init_gc_alloc_regions(evacuation_info);
3351 
3352         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3353         pre_evacuate_collection_set();
3354 
3355         // Actually do the work...
3356         evacuate_collection_set(evacuation_info, &per_thread_states);
3357 
3358         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3359 
3360         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3361         free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
3362 
3363         eagerly_reclaim_humongous_regions();
3364 
3365         collection_set()->clear_head();
3366 
3367         record_obj_copy_mem_stats();




  80 
  81 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  82 
  83 // INVARIANTS/NOTES
  84 //
  85 // All allocation activity covered by the G1CollectedHeap interface is
  86 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  87 // and allocate_new_tlab, which are the "entry" points to the
  88 // allocation code from the rest of the JVM.  (Note that this does not
  89 // apply to TLAB allocation, which is not part of this interface: it
  90 // is done by clients of this interface.)
  91 
  92 // Local to this file.
  93 
  94 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  95   bool _concurrent;
  96 public:
  97   RefineCardTableEntryClosure() : _concurrent(true) { }
  98 
  99   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 100     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, NULL);
 101     // This path is executed by the concurrent refine or mutator threads,
 102     // concurrently, and so we do not care if card_ptr contains references
 103     // that point into the collection set.
 104     assert(!oops_into_cset, "should be");
 105 
 106     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 107       // Caller will actually yield.
 108       return false;
 109     }
 110     // Otherwise, we finished successfully; return true.
 111     return true;
 112   }
 113 
 114   void set_concurrent(bool b) { _concurrent = b; }
 115 };
 116 
 117 
 118 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 119  private:
 120   size_t _num_dirtied;


3060   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3061   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3062                                                                   cl.total_humongous(),
3063                                                                   cl.candidate_humongous());
3064   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3065 
3066   // Finally flush all remembered set entries to re-check into the global DCQS.
3067   cl.flush_rem_set_entries();
3068 }
3069 
3070 class VerifyRegionRemSetClosure : public HeapRegionClosure {
3071   public:
3072     bool doHeapRegion(HeapRegion* hr) {
3073       if (!hr->is_archive() && !hr->is_continues_humongous()) {
3074         hr->verify_rem_set();
3075       }
3076       return false;
3077     }
3078 };
3079 






















3080 uint G1CollectedHeap::num_task_queues() const {
3081   return _task_queues->size();
3082 }
3083 
3084 #if TASKQUEUE_STATS
3085 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3086   st->print_raw_cr("GC Task Stats");
3087   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3088   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3089 }
3090 
3091 void G1CollectedHeap::print_taskqueue_stats() const {
3092   if (!log_is_enabled(Trace, gc, task, stats)) {
3093     return;
3094   }
3095   Log(gc, task, stats) log;
3096   ResourceMark rm;
3097   outputStream* st = log.trace_stream();
3098 
3099   print_taskqueue_stats_hdr(st);


3301         // If the remembered sets are not up to date we might miss some
3302         // entries that need to be handled.
3303         g1_rem_set()->cleanupHRRS();
3304 
3305         register_humongous_regions_with_cset();
3306 
3307         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3308 
3309         _cm->note_start_of_gc();
3310         // We call this after finalize_cset() to
3311         // ensure that the CSet has been finalized.
3312         _cm->verify_no_cset_oops();
3313 
3314         if (_hr_printer.is_active()) {
3315           HeapRegion* hr = collection_set()->head();
3316           while (hr != NULL) {
3317             _hr_printer.cset(hr);
3318             hr = hr->next_in_collection_set();
3319           }
3320         }





3321 
3322         // Initialize the GC alloc regions.
3323         _allocator->init_gc_alloc_regions(evacuation_info);
3324 
3325         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3326         pre_evacuate_collection_set();
3327 
3328         // Actually do the work...
3329         evacuate_collection_set(evacuation_info, &per_thread_states);
3330 
3331         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3332 
3333         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3334         free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
3335 
3336         eagerly_reclaim_humongous_regions();
3337 
3338         collection_set()->clear_head();
3339 
3340         record_obj_copy_mem_stats();


< prev index next >