src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 4801 : imported patch code-movement
rev 4802 : imported patch optimize-nmethod-scanning
rev 4803 : imported patch thomas-comments-2


   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "code/icBuffer.hpp"
  27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  35 #include "gc_implementation/g1/g1EvacFailure.hpp"
  36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  37 #include "gc_implementation/g1/g1Log.hpp"
  38 #include "gc_implementation/g1/g1MarkSweep.hpp"
  39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  40 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  41 #include "gc_implementation/g1/heapRegion.inline.hpp"
  42 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  44 #include "gc_implementation/g1/vm_operations_g1.hpp"
  45 #include "gc_implementation/shared/isGCActiveMark.hpp"


1155 
1156   if (!isHumongous(word_size)) {
1157     return _mutator_alloc_region.attempt_allocation_locked(word_size,
1158                                                       false /* bot_updates */);
1159   } else {
1160     HeapWord* result = humongous_obj_allocate(word_size);
1161     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1162       g1_policy()->set_initiate_conc_mark_if_possible();
1163     }
1164     return result;
1165   }
1166 
1167   ShouldNotReachHere();
1168 }
1169 
1170 class PostMCRemSetClearClosure: public HeapRegionClosure {
1171   G1CollectedHeap* _g1h;
1172   ModRefBarrierSet* _mr_bs;
1173 public:
1174   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1175     _g1h(g1h), _mr_bs(mr_bs) { }

1176   bool doHeapRegion(HeapRegion* r) {


1177     if (r->continuesHumongous()) {


1178       return false;
1179     }

1180     _g1h->reset_gc_time_stamps(r);
1181     HeapRegionRemSet* hrrs = r->rem_set();
1182     if (hrrs != NULL) hrrs->clear();
1183     // You might think here that we could clear just the cards
1184     // corresponding to the used region.  But no: if we leave a dirty card
1185     // in a region we might allocate into, then it would prevent that card
1186     // from being enqueued, and cause it to be missed.
1187     // Re: the performance cost: we shouldn't be doing full GC anyway!
1188     _mr_bs->clear(MemRegion(r->bottom(), r->end()));

1189     return false;
1190   }
1191 };
1192 
1193 void G1CollectedHeap::clear_rsets_post_compaction() {
1194   PostMCRemSetClearClosure rs_clear(this, mr_bs());
1195   heap_region_iterate(&rs_clear);
1196 }
1197 
1198 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1199   G1CollectedHeap*   _g1h;
1200   UpdateRSOopClosure _cl;
1201   int                _worker_i;
1202 public:
1203   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1204     _cl(g1->g1_rem_set(), worker_i),
1205     _worker_i(worker_i),
1206     _g1h(g1)
1207   { }
1208 


1248         } else {
1249           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1250         }
1251       } else {
1252         assert(hr->continuesHumongous(), "only way to get here");
1253         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1254       }
1255     }
1256     return false;
1257   }
1258 
1259   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1260     : _hr_printer(hr_printer) { }
1261 };
1262 
1263 void G1CollectedHeap::print_hrs_post_compaction() {
1264   PostCompactionPrinterClosure cl(hr_printer());
1265   heap_region_iterate(&cl);
1266 }
1267 
1268 double G1CollectedHeap::verify(bool guard, const char* msg) {
1269   double verify_time_ms = 0.0;
1270 
1271   if (guard && total_collections() >= VerifyGCStartAt) {
1272     double verify_start = os::elapsedTime();
1273     HandleMark hm;  // Discard invalid handles created during verification
1274     prepare_for_verify();
1275     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
1276     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
1277   }
1278 
1279   return verify_time_ms;
1280 }
1281 
1282 void G1CollectedHeap::verify_before_gc() {
1283   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
1284   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
1285 }
1286 
1287 void G1CollectedHeap::verify_after_gc() {
1288   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
1289   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
1290 }
1291 
1292 bool G1CollectedHeap::do_collection(bool explicit_gc,
1293                                     bool clear_all_soft_refs,
1294                                     size_t word_size) {
1295   assert_at_safepoint(true /* should_be_vm_thread */);
1296 
1297   if (GC_locker::check_active_before_gc()) {
1298     return false;
1299   }
1300 
1301   SvcGCMarker sgcm(SvcGCMarker::FULL);
1302   ResourceMark rm;
1303 
1304   print_heap_before_gc();
1305 
1306   size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
1307 
1308   HRSPhaseSetter x(HRSPhaseFullGC);
1309   verify_region_sets_optional();
1310 
1311   const bool do_clear_all_soft_refs = clear_all_soft_refs ||


1476         ParRebuildRSTask rebuild_rs_task(this);
1477         assert(check_heap_region_claim_values(
1478                HeapRegion::InitialClaimValue), "sanity check");
1479         assert(UseDynamicNumberOfGCThreads ||
1480                workers()->active_workers() == workers()->total_workers(),
1481                "Unless dynamic should use total workers");
1482         // Use the most recent number of  active workers
1483         assert(workers()->active_workers() > 0,
1484                "Active workers not properly set");
1485         set_par_threads(workers()->active_workers());
1486         workers()->run_task(&rebuild_rs_task);
1487         set_par_threads(0);
1488         assert(check_heap_region_claim_values(
1489                HeapRegion::RebuildRSClaimValue), "sanity check");
1490         reset_heap_region_claim_values();
1491       } else {
1492         RebuildRSOutOfRegionClosure rebuild_rs(this);
1493         heap_region_iterate(&rebuild_rs);
1494       }
1495 



1496       if (true) { // FIXME
1497         MetaspaceGC::compute_new_size();
1498       }
1499 
1500 #ifdef TRACESPINNING
1501       ParallelTaskTerminator::print_termination_counts();
1502 #endif
1503 
1504       // Discard all rset updates
1505       JavaThread::dirty_card_queue_set().abandon_logs();
1506       assert(!G1DeferredRSUpdate
1507              || (G1DeferredRSUpdate &&
1508                 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1509 
1510       _young_list->reset_sampled_info();
1511       // At this point there should be no regions in the
1512       // entire heap tagged as young.
1513       assert(check_young_list_empty(true /* check_heap */),
1514              "young list should be empty at this point");
1515 


3032 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
3033   switch (vo) {
3034   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
3035   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
3036   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
3037   default:                            ShouldNotReachHere();
3038   }
3039   return false; // keep some compilers happy
3040 }
3041 
3042 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
3043   switch (vo) {
3044   case VerifyOption_G1UsePrevMarking: return "PTAMS";
3045   case VerifyOption_G1UseNextMarking: return "NTAMS";
3046   case VerifyOption_G1UseMarkWord:    return "NONE";
3047   default:                            ShouldNotReachHere();
3048   }
3049   return NULL; // keep some compilers happy
3050 }
3051 











































































































































3052 class VerifyLivenessOopClosure: public OopClosure {
3053   G1CollectedHeap* _g1h;
3054   VerifyOption _vo;
3055 public:
3056   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3057     _g1h(g1h), _vo(vo)
3058   { }
3059   void do_oop(narrowOop *p) { do_oop_work(p); }
3060   void do_oop(      oop *p) { do_oop_work(p); }
3061 
3062   template <class T> void do_oop_work(T *p) {
3063     oop obj = oopDesc::load_decode_heap_oop(p);
3064     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
3065               "Dead object referenced by a not dead object");
3066   }
3067 };
3068 
3069 class VerifyObjsInRegionClosure: public ObjectClosure {
3070 private:
3071   G1CollectedHeap* _g1h;


3165           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3166             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
3167                                    "max_live_bytes "SIZE_FORMAT" "
3168                                    "< calculated "SIZE_FORMAT,
3169                                    r->bottom(), r->end(),
3170                                    r->max_live_bytes(),
3171                                  not_dead_yet_cl.live_bytes());
3172             _failures = true;
3173           }
3174         } else {
3175           // When vo == UseNextMarking we cannot currently do a sanity
3176           // check on the live bytes as the calculation has not been
3177           // finalized yet.
3178         }
3179       }
3180     }
3181     return false; // stop the region iteration if we hit a failure
3182   }
3183 };
3184 
3185 class YoungRefCounterClosure : public OopClosure {
3186   G1CollectedHeap* _g1h;
3187   int              _count;
3188  public:
3189   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3190   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
3191   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3192 
3193   int count() { return _count; }
3194   void reset_count() { _count = 0; };
3195 };
3196 
3197 class VerifyKlassClosure: public KlassClosure {
3198   YoungRefCounterClosure _young_ref_counter_closure;
3199   OopClosure *_oop_closure;
3200  public:
3201   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3202   void do_klass(Klass* k) {
3203     k->oops_do(_oop_closure);
3204 
3205     _young_ref_counter_closure.reset_count();
3206     k->oops_do(&_young_ref_counter_closure);
3207     if (_young_ref_counter_closure.count() > 0) {
3208       guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
3209     }
3210   }
3211 };
3212 
3213 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3214 //       pass it as the perm_blk to SharedHeap::process_strong_roots.
3215 //       When process_strong_roots stop calling perm_blk->younger_refs_iterate
3216 //       we can change this closure to extend the simpler OopClosure.
3217 class VerifyRootsClosure: public OopsInGenClosure {
3218 private:
3219   G1CollectedHeap* _g1h;
3220   VerifyOption     _vo;
3221   bool             _failures;
3222 public:
3223   // _vo == UsePrevMarking -> use "prev" marking information,
3224   // _vo == UseNextMarking -> use "next" marking information,
3225   // _vo == UseMarkWord    -> use mark word from object header.
3226   VerifyRootsClosure(VerifyOption vo) :
3227     _g1h(G1CollectedHeap::heap()),
3228     _vo(vo),
3229     _failures(false) { }
3230 
3231   bool failures() { return _failures; }
3232 
3233   template <class T> void do_oop_nv(T* p) {
3234     T heap_oop = oopDesc::load_heap_oop(p);
3235     if (!oopDesc::is_null(heap_oop)) {
3236       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3237       if (_g1h->is_obj_dead_cond(obj, _vo)) {
3238         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3239                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
3240         if (_vo == VerifyOption_G1UseMarkWord) {
3241           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3242         }
3243         obj->print_on(gclog_or_tty);
3244         _failures = true;
3245       }
3246     }
3247   }
3248 
3249   void do_oop(oop* p)       { do_oop_nv(p); }
3250   void do_oop(narrowOop* p) { do_oop_nv(p); }
3251 };
3252 
3253 // This is the task used for parallel heap verification.
3254 
3255 class G1ParVerifyTask: public AbstractGangTask {
3256 private:
3257   G1CollectedHeap* _g1h;
3258   VerifyOption     _vo;
3259   bool             _failures;
3260 
3261 public:
3262   // _vo == UsePrevMarking -> use "prev" marking information,
3263   // _vo == UseNextMarking -> use "next" marking information,
3264   // _vo == UseMarkWord    -> use mark word from object header.
3265   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3266     AbstractGangTask("Parallel verify task"),
3267     _g1h(g1h),
3268     _vo(vo),
3269     _failures(false) { }
3270 
3271   bool failures() {
3272     return _failures;
3273   }
3274 
3275   void work(uint worker_id) {
3276     HandleMark hm;
3277     VerifyRegionClosure blk(true, _vo);
3278     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3279                                           _g1h->workers()->active_workers(),
3280                                           HeapRegion::ParVerifyClaimValue);
3281     if (blk.failures()) {
3282       _failures = true;
3283     }
3284   }
3285 };
3286 
3287 void G1CollectedHeap::verify(bool silent) {
3288   verify(silent, VerifyOption_G1UsePrevMarking);
3289 }
3290 
3291 void G1CollectedHeap::verify(bool silent,
3292                              VerifyOption vo) {
3293   if (SafepointSynchronize::is_at_safepoint()) {
3294     if (!silent) { gclog_or_tty->print("Roots "); }
3295     VerifyRootsClosure rootsCl(vo);
3296 
3297     assert(Thread::current()->is_VM_thread(),
3298            "Expected to be executed serially by the VM thread at this point");
3299 
3300     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);



3301     VerifyKlassClosure klassCl(this, &rootsCl);
3302 
3303     // We apply the relevant closures to all the oops in the
3304     // system dictionary, the string table and the code cache.
3305     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3306 
3307     // Need cleared claim bits for the strong roots processing
3308     ClassLoaderDataGraph::clear_claimed_marks();
3309 
3310     process_strong_roots(true,      // activate StrongRootsScope
3311                          false,     // we set "is scavenging" to false,
3312                                     // so we don't reset the dirty cards.
3313                          ScanningOption(so),  // roots scanning options
3314                          &rootsCl,
3315                          &blobsCl,
3316                          &klassCl
3317                          );
3318 
3319     bool failures = rootsCl.failures();
3320 
3321     if (vo != VerifyOption_G1UseMarkWord) {
3322       // If we're verifying during a full GC then the region sets
3323       // will have been torn down at the start of the GC. Therefore
3324       // verifying the region sets will fail. So we only verify
3325       // the region sets when not in a full GC.
3326       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3327       verify_region_sets();
3328     }
3329 
3330     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3331     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3332       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3333              "sanity check");
3334 
3335       G1ParVerifyTask task(this, vo);
3336       assert(UseDynamicNumberOfGCThreads ||
3337         workers()->active_workers() == workers()->total_workers(),
3338         "If not dynamic should be using all the workers");
3339       int n_workers = workers()->active_workers();


3368       // It helps to have the per-region information in the output to
3369       // help us track down what went wrong. This is why we call
3370       // print_extended_on() instead of print_on().
3371       print_extended_on(gclog_or_tty);
3372       gclog_or_tty->print_cr("");
3373 #ifndef PRODUCT
3374       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3375         concurrent_mark()->print_reachable("at-verification-failure",
3376                                            vo, false /* all */);
3377       }
3378 #endif
3379       gclog_or_tty->flush();
3380     }
3381     guarantee(!failures, "there should not have been any failures");
3382   } else {
3383     if (!silent)
3384       gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) ");
3385   }
3386 }
3387 




























3388 class PrintRegionClosure: public HeapRegionClosure {
3389   outputStream* _st;
3390 public:
3391   PrintRegionClosure(outputStream* st) : _st(st) {}
3392   bool doHeapRegion(HeapRegion* r) {
3393     r->print_on(_st);
3394     return false;
3395   }
3396 };
3397 
3398 void G1CollectedHeap::print_on(outputStream* st) const {
3399   st->print(" %-20s", "garbage-first heap");
3400   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3401             capacity()/K, used_unlocked()/K);
3402   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3403             _g1_storage.low_boundary(),
3404             _g1_storage.high(),
3405             _g1_storage.high_boundary());
3406   st->cr();
3407   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);


3780 
3781     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3782                                 workers()->active_workers() : 1);
3783     double pause_start_sec = os::elapsedTime();
3784     g1_policy()->phase_times()->note_gc_start(active_workers);
3785     log_gc_header();
3786 
3787     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3788     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3789 
3790     // If the secondary_free_list is not empty, append it to the
3791     // free_list. No need to wait for the cleanup operation to finish;
3792     // the region allocation code will check the secondary_free_list
3793     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3794     // set, skip this step so that the region allocation code has to
3795     // get entries from the secondary_free_list.
3796     if (!G1StressConcRegionFreeing) {
3797       append_secondary_free_list_if_not_empty_with_lock();
3798     }
3799 
3800     assert(check_young_list_well_formed(),
3801       "young list should be well formed");

3802 
3803     // Don't dynamically change the number of GC threads this early.  A value of
3804     // 0 is used to indicate serial work.  When parallel work is done,
3805     // it will be set.
3806 
3807     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3808       IsGCActiveMark x;
3809 
3810       gc_prologue(false);
3811       increment_total_collections(false /* full gc */);
3812       increment_gc_time_stamp();
3813 
3814       verify_before_gc();
3815 
3816       COMPILER2_PRESENT(DerivedPointerTable::clear());
3817 
3818       // Please see comment in g1CollectedHeap.hpp and
3819       // G1CollectedHeap::ref_processing_init() to see how
3820       // reference processing currently works in G1.
3821 


4885       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
4886 
4887       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4888       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4889 
4890       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
4891       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4892       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4893 
4894       OopClosure*                    scan_root_cl = &only_scan_root_cl;
4895       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
4896 
4897       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4898         // We also need to mark copied objects.
4899         scan_root_cl = &scan_mark_root_cl;
4900         scan_klasses_cl = &scan_mark_klasses_cl_s;
4901       }
4902 
4903       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
4904 
4905       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;




4906 
4907       pss.start_strong_roots();
4908       _g1h->g1_process_strong_roots(/* is scavenging */ true,
4909                                     SharedHeap::ScanningOption(so),
4910                                     scan_root_cl,
4911                                     &push_heap_rs_cl,
4912                                     scan_klasses_cl,
4913                                     worker_id);
4914       pss.end_strong_roots();
4915 
4916       {
4917         double start = os::elapsedTime();
4918         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4919         evac.do_void();
4920         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4921         double term_ms = pss.term_time()*1000.0;
4922         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4923         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4924       }
4925       _g1h->g1_policy()->record_thread_age_table(pss.age_table());


4927 
4928       if (ParallelGCVerbose) {
4929         MutexLocker x(stats_lock());
4930         pss.print_termination_stats(worker_id);
4931       }
4932 
4933       assert(pss.refs()->is_empty(), "should be empty");
4934 
4935       // Close the inner scope so that the ResourceMark and HandleMark
4936       // destructors are executed here and are included as part of the
4937       // "GC Worker Time".
4938     }
4939 
4940     double end_time_ms = os::elapsedTime() * 1000.0;
4941     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4942   }
4943 };
4944 
4945 // *** Common G1 Evacuation Stuff
4946 
4947 // Closures that support the filtering of CodeBlobs scanned during
4948 // external root scanning.
4949 
4950 // Closure applied to reference fields in code blobs (specifically nmethods)
4951 // to determine whether an nmethod contains references that point into
4952 // the collection set. Used as a predicate when walking code roots so
4953 // that only nmethods that point into the collection set are added to the
4954 // 'marked' list.
4955 
4956 class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
4957 
4958   class G1PointsIntoCSOopClosure : public OopClosure {
4959     G1CollectedHeap* _g1;
4960     bool _points_into_cs;
4961   public:
4962     G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
4963       _g1(g1), _points_into_cs(false) { }
4964 
4965     bool points_into_cs() const { return _points_into_cs; }
4966 
4967     template <class T>
4968     void do_oop_nv(T* p) {
4969       if (!_points_into_cs) {
4970         T heap_oop = oopDesc::load_heap_oop(p);
4971         if (!oopDesc::is_null(heap_oop) &&
4972             _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
4973           _points_into_cs = true;
4974         }
4975       }
4976     }
4977 
4978     virtual void do_oop(oop* p)        { do_oop_nv(p); }
4979     virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
4980   };
4981 
4982   G1CollectedHeap* _g1;
4983 
4984 public:
4985   G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
4986     CodeBlobToOopClosure(cl, true), _g1(g1) { }
4987 
4988   virtual void do_code_blob(CodeBlob* cb) {
4989     nmethod* nm = cb->as_nmethod_or_null();
4990     if (nm != NULL && !(nm->test_oops_do_mark())) {
4991       G1PointsIntoCSOopClosure predicate_cl(_g1);
4992       nm->oops_do(&predicate_cl);
4993 
4994       if (predicate_cl.points_into_cs()) {
4995         // At least one of the reference fields or the oop relocations
4996         // in the nmethod points into the collection set. We have to
4997         // 'mark' this nmethod.
4998         // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
4999         // or MarkingCodeBlobClosure::do_code_blob() change.
5000         if (!nm->test_set_oops_do_mark()) {
5001           do_newly_marked_nmethod(nm);
5002         }
5003       }
5004     }
5005   }
5006 };
5007 
5008 // This method is run in a GC worker.
5009 
5010 void
5011 G1CollectedHeap::
5012 g1_process_strong_roots(bool is_scavenging,
5013                         ScanningOption so,
5014                         OopClosure* scan_non_heap_roots,
5015                         OopsInHeapRegionClosure* scan_rs,
5016                         G1KlassScanClosure* scan_klasses,
5017                         int worker_i) {
5018 
5019   // First scan the strong roots
5020   double ext_roots_start = os::elapsedTime();
5021   double closure_app_time_sec = 0.0;
5022 
5023   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5024 
5025   // Walk the code cache w/o buffering, because StarTask cannot handle
5026   // unaligned oop locations.
5027   G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);

5028 
5029   process_strong_roots(false, // no scoping; this is parallel code
5030                        is_scavenging, so,
5031                        &buf_scan_non_heap_roots,
5032                        &eager_scan_code_roots,
5033                        scan_klasses
5034                        );
5035 
5036   // Now the CM ref_processor roots.
5037   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5038     // We need to treat the discovered reference lists of the
5039     // concurrent mark ref processor as roots and keep entries
5040     // (which are added by the marking threads) on them live
5041     // until they can be processed at the end of marking.
5042     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
5043   }
5044 
5045   // Finish up any enqueued closure apps (attributed as object copy time).
5046   buf_scan_non_heap_roots.done();
5047 


5052   double ext_root_time_ms =
5053     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
5054 
5055   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
5056 
5057   // During conc marking we have to filter the per-thread SATB buffers
5058   // to make sure we remove any oops into the CSet (which will show up
5059   // as implicitly live).
5060   double satb_filtering_ms = 0.0;
5061   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
5062     if (mark_in_progress()) {
5063       double satb_filter_start = os::elapsedTime();
5064 
5065       JavaThread::satb_mark_queue_set().filter_thread_buffers();
5066 
5067       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
5068     }
5069   }
5070   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
5071 













5072   // Now scan the complement of the collection set.
5073   if (scan_rs != NULL) {
5074     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
5075   }
5076   _process_strong_tasks->all_tasks_completed();
5077 }
5078 
5079 void
5080 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
5081   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5082   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
5083 }
5084 
5085 // Weak Reference Processing support
5086 
5087 // An always "is_alive" closure that is used to preserve referents.
5088 // If the object is non-null then it's alive.  Used in the preservation
5089 // of referent objects that are pointed to by reference objects
5090 // discovered by the CM ref processor.
5091 class G1AlwaysAliveClosure: public BoolObjectClosure {
5092   G1CollectedHeap* _g1;
5093 public:
5094   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}


5665     // reported parallel time.
5666   }
5667 
5668   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5669   g1_policy()->phase_times()->record_par_time(par_time_ms);
5670 
5671   double code_root_fixup_time_ms =
5672         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5673   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5674 
5675   set_par_threads(0);
5676 
5677   // Process any discovered reference objects - we have
5678   // to do this _before_ we retire the GC alloc regions
5679   // as we may have to copy some 'reachable' referent
5680   // objects (and their reachable sub-graphs) that were
5681   // not copied during the pause.
5682   process_discovered_references(n_workers);
5683 
5684   // Weak root processing.
5685   // Note: when JSR 292 is enabled and code blobs can contain
5686   // non-perm oops then we will need to process the code blobs
5687   // here too.
5688   {
5689     G1STWIsAliveClosure is_alive(this);
5690     G1KeepAliveClosure keep_alive(this);
5691     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5692   }
5693 
5694   release_gc_alloc_regions(n_workers);
5695   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5696 
5697   // Reset and re-enable the hot card cache.
5698   // Note the counts for the cards in the regions in the
5699   // collection set are reset when the collection set is freed.
5700   hot_card_cache->reset_hot_cache();
5701   hot_card_cache->set_use_cache(true);
5702 











5703   finalize_for_evac_failure();
5704 
5705   if (evacuation_failed()) {
5706     remove_self_forwarding_pointers();
5707 
5708     // Reset the G1EvacuationFailureALot counters and flags
5709     // Note: the values are reset only when an actual
5710     // evacuation failure occurs.
5711     NOT_PRODUCT(reset_evacuation_should_fail();)
5712   }
5713 
5714   // Enqueue any remaining references remaining on the STW
5715   // reference processor's discovered lists. We need to do
5716   // this after the card table is cleaned (and verified) as
5717   // the act of enqueuing entries on to the pending list
5718   // will log these updates (and dirty their associated
5719   // cards). We need these updates logged to update any
5720   // RSets.
5721   enqueue_discovered_references(n_workers);
5722 


6477   }
6478 
6479   // Make sure we append the secondary_free_list on the free_list so
6480   // that all free regions we will come across can be safely
6481   // attributed to the free_list.
6482   append_secondary_free_list_if_not_empty_with_lock();
6483 
6484   // Finally, make sure that the region accounting in the lists is
6485   // consistent with what we see in the heap.
6486   _old_set.verify_start();
6487   _humongous_set.verify_start();
6488   _free_list.verify_start();
6489 
6490   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6491   heap_region_iterate(&cl);
6492 
6493   _old_set.verify_end();
6494   _humongous_set.verify_end();
6495   _free_list.verify_end();
6496 }







































































































































































































   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  29 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  30 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  31 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  32 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  34 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  35 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  36 #include "gc_implementation/g1/g1EvacFailure.hpp"
  37 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  38 #include "gc_implementation/g1/g1Log.hpp"
  39 #include "gc_implementation/g1/g1MarkSweep.hpp"
  40 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  41 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  42 #include "gc_implementation/g1/heapRegion.inline.hpp"
  43 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  44 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  45 #include "gc_implementation/g1/vm_operations_g1.hpp"
  46 #include "gc_implementation/shared/isGCActiveMark.hpp"


1156 
1157   if (!isHumongous(word_size)) {
1158     return _mutator_alloc_region.attempt_allocation_locked(word_size,
1159                                                       false /* bot_updates */);
1160   } else {
1161     HeapWord* result = humongous_obj_allocate(word_size);
1162     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1163       g1_policy()->set_initiate_conc_mark_if_possible();
1164     }
1165     return result;
1166   }
1167 
1168   ShouldNotReachHere();
1169 }
1170 
1171 class PostMCRemSetClearClosure: public HeapRegionClosure {
1172   G1CollectedHeap* _g1h;
1173   ModRefBarrierSet* _mr_bs;
1174 public:
1175   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1176     _g1h(g1h), _mr_bs(mr_bs) {}
1177 
1178   bool doHeapRegion(HeapRegion* r) {
1179     HeapRegionRemSet* hrrs = r->rem_set();
1180 
1181     if (r->continuesHumongous()) {
1182       // We'll assert that the strong code root list is empty
1183       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1184       return false;
1185     }
1186 
1187     _g1h->reset_gc_time_stamps(r);
1188     hrrs->clear();

1189     // You might think here that we could clear just the cards
1190     // corresponding to the used region.  But no: if we leave a dirty card
1191     // in a region we might allocate into, then it would prevent that card
1192     // from being enqueued, and cause it to be missed.
1193     // Re: the performance cost: we shouldn't be doing full GC anyway!
1194     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1195 
1196     return false;
1197   }
1198 };
1199 
1200 void G1CollectedHeap::clear_rsets_post_compaction() {
1201   PostMCRemSetClearClosure rs_clear(this, mr_bs());
1202   heap_region_iterate(&rs_clear);
1203 }
1204 
1205 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1206   G1CollectedHeap*   _g1h;
1207   UpdateRSOopClosure _cl;
1208   int                _worker_i;
1209 public:
1210   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1211     _cl(g1->g1_rem_set(), worker_i),
1212     _worker_i(worker_i),
1213     _g1h(g1)
1214   { }
1215 


1255         } else {
1256           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1257         }
1258       } else {
1259         assert(hr->continuesHumongous(), "only way to get here");
1260         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1261       }
1262     }
1263     return false;
1264   }
1265 
1266   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1267     : _hr_printer(hr_printer) { }
1268 };
1269 
1270 void G1CollectedHeap::print_hrs_post_compaction() {
1271   PostCompactionPrinterClosure cl(hr_printer());
1272   heap_region_iterate(&cl);
1273 }
1274 
























1275 bool G1CollectedHeap::do_collection(bool explicit_gc,
1276                                     bool clear_all_soft_refs,
1277                                     size_t word_size) {
1278   assert_at_safepoint(true /* should_be_vm_thread */);
1279 
1280   if (GC_locker::check_active_before_gc()) {
1281     return false;
1282   }
1283 
1284   SvcGCMarker sgcm(SvcGCMarker::FULL);
1285   ResourceMark rm;
1286 
1287   print_heap_before_gc();
1288 
1289   size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
1290 
1291   HRSPhaseSetter x(HRSPhaseFullGC);
1292   verify_region_sets_optional();
1293 
1294   const bool do_clear_all_soft_refs = clear_all_soft_refs ||


1459         ParRebuildRSTask rebuild_rs_task(this);
1460         assert(check_heap_region_claim_values(
1461                HeapRegion::InitialClaimValue), "sanity check");
1462         assert(UseDynamicNumberOfGCThreads ||
1463                workers()->active_workers() == workers()->total_workers(),
1464                "Unless dynamic should use total workers");
1465         // Use the most recent number of  active workers
1466         assert(workers()->active_workers() > 0,
1467                "Active workers not properly set");
1468         set_par_threads(workers()->active_workers());
1469         workers()->run_task(&rebuild_rs_task);
1470         set_par_threads(0);
1471         assert(check_heap_region_claim_values(
1472                HeapRegion::RebuildRSClaimValue), "sanity check");
1473         reset_heap_region_claim_values();
1474       } else {
1475         RebuildRSOutOfRegionClosure rebuild_rs(this);
1476         heap_region_iterate(&rebuild_rs);
1477       }
1478 
1479       // Rebuild the strong code root lists for each region
1480       rebuild_strong_code_roots();
1481 
1482       if (true) { // FIXME
1483         MetaspaceGC::compute_new_size();
1484       }
1485 
1486 #ifdef TRACESPINNING
1487       ParallelTaskTerminator::print_termination_counts();
1488 #endif
1489 
1490       // Discard all rset updates
1491       JavaThread::dirty_card_queue_set().abandon_logs();
1492       assert(!G1DeferredRSUpdate
1493              || (G1DeferredRSUpdate &&
1494                 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1495 
1496       _young_list->reset_sampled_info();
1497       // At this point there should be no regions in the
1498       // entire heap tagged as young.
1499       assert(check_young_list_empty(true /* check_heap */),
1500              "young list should be empty at this point");
1501 


3018 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
3019   switch (vo) {
3020   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
3021   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
3022   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
3023   default:                            ShouldNotReachHere();
3024   }
3025   return false; // keep some compilers happy
3026 }
3027 
3028 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
3029   switch (vo) {
3030   case VerifyOption_G1UsePrevMarking: return "PTAMS";
3031   case VerifyOption_G1UseNextMarking: return "NTAMS";
3032   case VerifyOption_G1UseMarkWord:    return "NONE";
3033   default:                            ShouldNotReachHere();
3034   }
3035   return NULL; // keep some compilers happy
3036 }
3037 
3038 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3039 //       pass it as the perm_blk to SharedHeap::process_strong_roots.
3040 //       When process_strong_roots stop calling perm_blk->younger_refs_iterate
3041 //       we can change this closure to extend the simpler OopClosure.
3042 class VerifyRootsClosure: public OopsInGenClosure {
3043 private:
3044   G1CollectedHeap* _g1h;
3045   VerifyOption     _vo;
3046   bool             _failures;
3047 public:
3048   // _vo == UsePrevMarking -> use "prev" marking information,
3049   // _vo == UseNextMarking -> use "next" marking information,
3050   // _vo == UseMarkWord    -> use mark word from object header.
3051   VerifyRootsClosure(VerifyOption vo) :
3052     _g1h(G1CollectedHeap::heap()),
3053     _vo(vo),
3054     _failures(false) { }
3055 
3056   bool failures() { return _failures; }
3057 
3058   template <class T> void do_oop_nv(T* p) {
3059     T heap_oop = oopDesc::load_heap_oop(p);
3060     if (!oopDesc::is_null(heap_oop)) {
3061       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3062       if (_g1h->is_obj_dead_cond(obj, _vo)) {
3063         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3064                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
3065         if (_vo == VerifyOption_G1UseMarkWord) {
3066           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3067         }
3068         obj->print_on(gclog_or_tty);
3069         _failures = true;
3070       }
3071     }
3072   }
3073 
3074   void do_oop(oop* p)       { do_oop_nv(p); }
3075   void do_oop(narrowOop* p) { do_oop_nv(p); }
3076 };
3077 
3078 class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
3079   G1CollectedHeap* _g1h;
3080   OopClosure* _root_cl;
3081   nmethod* _nm;
3082   VerifyOption _vo;
3083   bool _failures;
3084 
3085   template <class T> void do_oop_work(T* p) {
3086     // First verify that this root is live
3087     _root_cl->do_oop(p);
3088 
3089     if (!G1VerifyHeapRegionCodeRoots) {
3090       // We're not verifying the code roots attached to heap region.
3091       return;
3092     }
3093 
3094     // Don't check the code roots during marking verification in a full GC
3095     if (_vo == VerifyOption_G1UseMarkWord) {
3096       return;
3097     }
3098 
3099     // Now verify that the current nmethod (which contains p) is
3100     // in the code root list of the heap region containing the
3101     // object referenced by p.
3102 
3103     T heap_oop = oopDesc::load_heap_oop(p);
3104     if (!oopDesc::is_null(heap_oop)) {
3105       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3106       
3107       // Now fetch the region containing the object
3108       HeapRegion* hr = _g1h->heap_region_containing(obj);
3109       HeapRegionRemSet* hrrs = hr->rem_set();
3110       // Verify that the strong code root list for this region
3111       // contains the nmethod
3112       if (!hrrs->strong_code_roots_list_contains(_nm)) {
3113         gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
3114                               "from nmethod "PTR_FORMAT" not in strong "
3115                               "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
3116                               p, _nm, hr->bottom(), hr->end());
3117         _failures = true;
3118       }
3119     }
3120   }
3121 
3122 public:
3123   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
3124     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
3125 
3126   void do_oop(oop* p) { do_oop_work(p); }
3127   void do_oop(narrowOop* p) { do_oop_work(p); }
3128 
3129   void set_nmethod(nmethod* nm) { _nm = nm; }
3130   bool failures() { return _failures; }
3131 };
3132 
3133 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
3134   G1VerifyCodeRootOopClosure* _oop_cl;
3135 
3136 public:
3137   G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
3138     _oop_cl(oop_cl) {}
3139 
3140   void do_code_blob(CodeBlob* cb) {
3141     nmethod* nm = cb->as_nmethod_or_null();
3142     if (nm != NULL) {
3143       _oop_cl->set_nmethod(nm);
3144       nm->oops_do(_oop_cl);
3145     }
3146   }
3147 };
3148 
3149 class YoungRefCounterClosure : public OopClosure {
3150   G1CollectedHeap* _g1h;
3151   int              _count;
3152  public:
3153   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3154   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
3155   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3156 
3157   int count() { return _count; }
3158   void reset_count() { _count = 0; };
3159 };
3160 
3161 class VerifyKlassClosure: public KlassClosure {
3162   YoungRefCounterClosure _young_ref_counter_closure;
3163   OopClosure *_oop_closure;
3164  public:
3165   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3166   void do_klass(Klass* k) {
3167     k->oops_do(_oop_closure);
3168 
3169     _young_ref_counter_closure.reset_count();
3170     k->oops_do(&_young_ref_counter_closure);
3171     if (_young_ref_counter_closure.count() > 0) {
3172       guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
3173     }
3174   }
3175 };
3176 
3177 class VerifyLivenessOopClosure: public OopClosure {
3178   G1CollectedHeap* _g1h;
3179   VerifyOption _vo;
3180 public:
3181   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3182     _g1h(g1h), _vo(vo)
3183   { }
3184   void do_oop(narrowOop *p) { do_oop_work(p); }
3185   void do_oop(      oop *p) { do_oop_work(p); }
3186 
3187   template <class T> void do_oop_work(T *p) {
3188     oop obj = oopDesc::load_decode_heap_oop(p);
3189     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
3190               "Dead object referenced by a not dead object");
3191   }
3192 };
3193 
3194 class VerifyObjsInRegionClosure: public ObjectClosure {
3195 private:
3196   G1CollectedHeap* _g1h;


3290           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3291             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
3292                                    "max_live_bytes "SIZE_FORMAT" "
3293                                    "< calculated "SIZE_FORMAT,
3294                                    r->bottom(), r->end(),
3295                                    r->max_live_bytes(),
3296                                  not_dead_yet_cl.live_bytes());
3297             _failures = true;
3298           }
3299         } else {
3300           // When vo == UseNextMarking we cannot currently do a sanity
3301           // check on the live bytes as the calculation has not been
3302           // finalized yet.
3303         }
3304       }
3305     }
3306     return false; // stop the region iteration if we hit a failure
3307   }
3308 };
3309 
3310 // This is the task used for parallel verification of the heap regions




































































3311 
3312 class G1ParVerifyTask: public AbstractGangTask {
3313 private:
3314   G1CollectedHeap* _g1h;
3315   VerifyOption     _vo;
3316   bool             _failures;
3317 
3318 public:
3319   // _vo == UsePrevMarking -> use "prev" marking information,
3320   // _vo == UseNextMarking -> use "next" marking information,
3321   // _vo == UseMarkWord    -> use mark word from object header.
3322   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3323     AbstractGangTask("Parallel verify task"),
3324     _g1h(g1h),
3325     _vo(vo),
3326     _failures(false) { }
3327 
3328   bool failures() {
3329     return _failures;
3330   }
3331 
3332   void work(uint worker_id) {
3333     HandleMark hm;
3334     VerifyRegionClosure blk(true, _vo);
3335     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3336                                           _g1h->workers()->active_workers(),
3337                                           HeapRegion::ParVerifyClaimValue);
3338     if (blk.failures()) {
3339       _failures = true;
3340     }
3341   }
3342 };
3343 
3344 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {





3345   if (SafepointSynchronize::is_at_safepoint()) {



3346     assert(Thread::current()->is_VM_thread(),
3347            "Expected to be executed serially by the VM thread at this point");
3348 
3349     if (!silent) { gclog_or_tty->print("Roots "); }
3350     VerifyRootsClosure rootsCl(vo);
3351     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3352     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3353     VerifyKlassClosure klassCl(this, &rootsCl);
3354 
3355     // We apply the relevant closures to all the oops in the
3356     // system dictionary, the string table and the code cache.
3357     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3358 
3359     // Need cleared claim bits for the strong roots processing
3360     ClassLoaderDataGraph::clear_claimed_marks();
3361 
3362     process_strong_roots(true,      // activate StrongRootsScope
3363                          false,     // we set "is scavenging" to false,
3364                                     // so we don't reset the dirty cards.
3365                          ScanningOption(so),  // roots scanning options
3366                          &rootsCl,
3367                          &blobsCl,
3368                          &klassCl
3369                          );
3370 
3371     bool failures = rootsCl.failures() || codeRootsCl.failures();
3372 
3373     if (vo != VerifyOption_G1UseMarkWord) {
3374       // If we're verifying during a full GC then the region sets
3375       // will have been torn down at the start of the GC. Therefore
3376       // verifying the region sets will fail. So we only verify
3377       // the region sets when not in a full GC.
3378       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3379       verify_region_sets();
3380     }
3381 
3382     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3383     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3384       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3385              "sanity check");
3386 
3387       G1ParVerifyTask task(this, vo);
3388       assert(UseDynamicNumberOfGCThreads ||
3389         workers()->active_workers() == workers()->total_workers(),
3390         "If not dynamic should be using all the workers");
3391       int n_workers = workers()->active_workers();


3420       // It helps to have the per-region information in the output to
3421       // help us track down what went wrong. This is why we call
3422       // print_extended_on() instead of print_on().
3423       print_extended_on(gclog_or_tty);
3424       gclog_or_tty->print_cr("");
3425 #ifndef PRODUCT
3426       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3427         concurrent_mark()->print_reachable("at-verification-failure",
3428                                            vo, false /* all */);
3429       }
3430 #endif
3431       gclog_or_tty->flush();
3432     }
3433     guarantee(!failures, "there should not have been any failures");
3434   } else {
3435     if (!silent)
3436       gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) ");
3437   }
3438 }
3439 
3440 void G1CollectedHeap::verify(bool silent) {
3441   verify(silent, VerifyOption_G1UsePrevMarking);
3442 }
3443 
3444 double G1CollectedHeap::verify(bool guard, const char* msg) {
3445   double verify_time_ms = 0.0;
3446 
3447   if (guard && total_collections() >= VerifyGCStartAt) {
3448     double verify_start = os::elapsedTime();
3449     HandleMark hm;  // Discard invalid handles created during verification
3450     prepare_for_verify();
3451     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3452     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3453   }
3454 
3455   return verify_time_ms;
3456 }
3457 
3458 void G1CollectedHeap::verify_before_gc() {
3459   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3460   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3461 }
3462 
3463 void G1CollectedHeap::verify_after_gc() {
3464   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3465   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3466 }
3467 
3468 class PrintRegionClosure: public HeapRegionClosure {
3469   outputStream* _st;
3470 public:
3471   PrintRegionClosure(outputStream* st) : _st(st) {}
3472   bool doHeapRegion(HeapRegion* r) {
3473     r->print_on(_st);
3474     return false;
3475   }
3476 };
3477 
3478 void G1CollectedHeap::print_on(outputStream* st) const {
3479   st->print(" %-20s", "garbage-first heap");
3480   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3481             capacity()/K, used_unlocked()/K);
3482   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3483             _g1_storage.low_boundary(),
3484             _g1_storage.high(),
3485             _g1_storage.high_boundary());
3486   st->cr();
3487   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);


3860 
3861     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3862                                 workers()->active_workers() : 1);
3863     double pause_start_sec = os::elapsedTime();
3864     g1_policy()->phase_times()->note_gc_start(active_workers);
3865     log_gc_header();
3866 
3867     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3868     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3869 
3870     // If the secondary_free_list is not empty, append it to the
3871     // free_list. No need to wait for the cleanup operation to finish;
3872     // the region allocation code will check the secondary_free_list
3873     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3874     // set, skip this step so that the region allocation code has to
3875     // get entries from the secondary_free_list.
3876     if (!G1StressConcRegionFreeing) {
3877       append_secondary_free_list_if_not_empty_with_lock();
3878     }
3879 
3880     assert(check_young_list_well_formed(), "young list should be well formed");
3881     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3882            "sanity check");
3883 
3884     // Don't dynamically change the number of GC threads this early.  A value of
3885     // 0 is used to indicate serial work.  When parallel work is done,
3886     // it will be set.
3887 
3888     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3889       IsGCActiveMark x;
3890 
3891       gc_prologue(false);
3892       increment_total_collections(false /* full gc */);
3893       increment_gc_time_stamp();
3894 
3895       verify_before_gc();
3896 
3897       COMPILER2_PRESENT(DerivedPointerTable::clear());
3898 
3899       // Please see comment in g1CollectedHeap.hpp and
3900       // G1CollectedHeap::ref_processing_init() to see how
3901       // reference processing currently works in G1.
3902 


4966       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
4967 
4968       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4969       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4970 
4971       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
4972       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4973       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4974 
4975       OopClosure*                    scan_root_cl = &only_scan_root_cl;
4976       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
4977 
4978       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4979         // We also need to mark copied objects.
4980         scan_root_cl = &scan_mark_root_cl;
4981         scan_klasses_cl = &scan_mark_klasses_cl_s;
4982       }
4983 
4984       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
4985 
4986       // Don't scan the scavengable methods in the code cache as part
4987       // of strong root scanning. The code roots that point into a 
4988       // region in the collection set are scanned when we scan the
4989       // region's RSet.
4990       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
4991 
4992       pss.start_strong_roots();
4993       _g1h->g1_process_strong_roots(/* is scavenging */ true,
4994                                     SharedHeap::ScanningOption(so),
4995                                     scan_root_cl,
4996                                     &push_heap_rs_cl,
4997                                     scan_klasses_cl,
4998                                     worker_id);
4999       pss.end_strong_roots();
5000 
5001       {
5002         double start = os::elapsedTime();
5003         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
5004         evac.do_void();
5005         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
5006         double term_ms = pss.term_time()*1000.0;
5007         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
5008         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
5009       }
5010       _g1h->g1_policy()->record_thread_age_table(pss.age_table());


5012 
5013       if (ParallelGCVerbose) {
5014         MutexLocker x(stats_lock());
5015         pss.print_termination_stats(worker_id);
5016       }
5017 
5018       assert(pss.refs()->is_empty(), "should be empty");
5019 
5020       // Close the inner scope so that the ResourceMark and HandleMark
5021       // destructors are executed here and are included as part of the
5022       // "GC Worker Time".
5023     }
5024 
5025     double end_time_ms = os::elapsedTime() * 1000.0;
5026     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
5027   }
5028 };
5029 
5030 // *** Common G1 Evacuation Stuff
5031 





























































5032 // This method is run in a GC worker.
5033 
5034 void
5035 G1CollectedHeap::
5036 g1_process_strong_roots(bool is_scavenging,
5037                         ScanningOption so,
5038                         OopClosure* scan_non_heap_roots,
5039                         OopsInHeapRegionClosure* scan_rs,
5040                         G1KlassScanClosure* scan_klasses,
5041                         int worker_i) {
5042 
5043   // First scan the strong roots
5044   double ext_roots_start = os::elapsedTime();
5045   double closure_app_time_sec = 0.0;
5046 
5047   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5048 
5049   assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
5050   // Walk the code cache/strong code roots w/o buffering, because StarTask
5051   // cannot handle unaligned oop locations.
5052   CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
5053 
5054   process_strong_roots(false, // no scoping; this is parallel code
5055                        is_scavenging, so,
5056                        &buf_scan_non_heap_roots,
5057                        &eager_scan_code_roots,
5058                        scan_klasses
5059                        );
5060 
5061   // Now the CM ref_processor roots.
5062   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5063     // We need to treat the discovered reference lists of the
5064     // concurrent mark ref processor as roots and keep entries
5065     // (which are added by the marking threads) on them live
5066     // until they can be processed at the end of marking.
5067     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
5068   }
5069 
5070   // Finish up any enqueued closure apps (attributed as object copy time).
5071   buf_scan_non_heap_roots.done();
5072 


5077   double ext_root_time_ms =
5078     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
5079 
5080   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
5081 
5082   // During conc marking we have to filter the per-thread SATB buffers
5083   // to make sure we remove any oops into the CSet (which will show up
5084   // as implicitly live).
5085   double satb_filtering_ms = 0.0;
5086   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
5087     if (mark_in_progress()) {
5088       double satb_filter_start = os::elapsedTime();
5089 
5090       JavaThread::satb_mark_queue_set().filter_thread_buffers();
5091 
5092       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
5093     }
5094   }
5095   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
5096 
5097   // If this is an initial mark pause, and we're not scanning
5098   // the entire code cache, we need to mark the oops in the
5099   // strong code root lists for the regions that are not in
5100   // the collection set. 
5101   // Note all threads participate in this set of root tasks.
5102   double mark_strong_code_roots_ms = 0.0;
5103   if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
5104     double mark_strong_roots_start = os::elapsedTime();
5105     mark_strong_code_roots(worker_i);
5106     mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
5107   }
5108   g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
5109 
5110   // Now scan the complement of the collection set.
5111   if (scan_rs != NULL) {
5112     g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
5113   }
5114   _process_strong_tasks->all_tasks_completed();
5115 }
5116 
5117 void
5118 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
5119   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5120   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
5121 }
5122 
5123 // Weak Reference Processing support
5124 
5125 // An always "is_alive" closure that is used to preserve referents.
5126 // If the object is non-null then it's alive.  Used in the preservation
5127 // of referent objects that are pointed to by reference objects
5128 // discovered by the CM ref processor.
5129 class G1AlwaysAliveClosure: public BoolObjectClosure {
5130   G1CollectedHeap* _g1;
5131 public:
5132   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}


5703     // reported parallel time.
5704   }
5705 
5706   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5707   g1_policy()->phase_times()->record_par_time(par_time_ms);
5708 
5709   double code_root_fixup_time_ms =
5710         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5711   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5712 
5713   set_par_threads(0);
5714 
5715   // Process any discovered reference objects - we have
5716   // to do this _before_ we retire the GC alloc regions
5717   // as we may have to copy some 'reachable' referent
5718   // objects (and their reachable sub-graphs) that were
5719   // not copied during the pause.
5720   process_discovered_references(n_workers);
5721 
5722   // Weak root processing.



5723   {
5724     G1STWIsAliveClosure is_alive(this);
5725     G1KeepAliveClosure keep_alive(this);
5726     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5727   }
5728 
5729   release_gc_alloc_regions(n_workers);
5730   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5731 
5732   // Reset and re-enable the hot card cache.
5733   // Note the counts for the cards in the regions in the
5734   // collection set are reset when the collection set is freed.
5735   hot_card_cache->reset_hot_cache();
5736   hot_card_cache->set_use_cache(true);
5737 
5738   // Migrate the strong code roots attached to each region in
5739   // the collection set. Ideally we would like to do this
5740   // after we have finished the scanning/evacuation of the
5741   // strong code roots for a particular heap region.
5742   migrate_strong_code_roots();
5743 
5744   if (g1_policy()->during_initial_mark_pause()) {
5745     // Reset the claim values set during marking the strong code roots
5746     reset_heap_region_claim_values();
5747   }
5748 
5749   finalize_for_evac_failure();
5750 
5751   if (evacuation_failed()) {
5752     remove_self_forwarding_pointers();
5753 
5754     // Reset the G1EvacuationFailureALot counters and flags
5755     // Note: the values are reset only when an actual
5756     // evacuation failure occurs.
5757     NOT_PRODUCT(reset_evacuation_should_fail();)
5758   }
5759 
5760   // Enqueue any remaining references remaining on the STW
5761   // reference processor's discovered lists. We need to do
5762   // this after the card table is cleaned (and verified) as
5763   // the act of enqueuing entries on to the pending list
5764   // will log these updates (and dirty their associated
5765   // cards). We need these updates logged to update any
5766   // RSets.
5767   enqueue_discovered_references(n_workers);
5768 


6523   }
6524 
6525   // Make sure we append the secondary_free_list on the free_list so
6526   // that all free regions we will come across can be safely
6527   // attributed to the free_list.
6528   append_secondary_free_list_if_not_empty_with_lock();
6529 
6530   // Finally, make sure that the region accounting in the lists is
6531   // consistent with what we see in the heap.
6532   _old_set.verify_start();
6533   _humongous_set.verify_start();
6534   _free_list.verify_start();
6535 
6536   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6537   heap_region_iterate(&cl);
6538 
6539   _old_set.verify_end();
6540   _humongous_set.verify_end();
6541   _free_list.verify_end();
6542 }
6543 
6544 // Optimized nmethod scanning
6545 
6546 class RegisterNMethodOopClosure: public OopClosure {
6547   G1CollectedHeap* _g1h;
6548   nmethod* _nm;
6549 
6550   template <class T> void do_oop_work(T* p) {
6551     T heap_oop = oopDesc::load_heap_oop(p);
6552     if (!oopDesc::is_null(heap_oop)) {
6553       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6554       HeapRegion* hr = _g1h->heap_region_containing(obj);
6555       assert(!hr->isHumongous(), "code root in humongous region?");
6556 
6557       // HeapRegion::add_strong_code_root() avoids adding duplicate
6558       // entries but having duplicates is  OK since we "mark" nmethods
6559       // as visited when we scan the strong code root lists during the GC.
6560       hr->add_strong_code_root(_nm);
6561       assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?");
6562     }
6563   }
6564 
6565 public:
6566   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6567     _g1h(g1h), _nm(nm) {}
6568 
6569   void do_oop(oop* p)       { do_oop_work(p); }
6570   void do_oop(narrowOop* p) { do_oop_work(p); }
6571 };
6572 
6573 class UnregisterNMethodOopClosure: public OopClosure {
6574   G1CollectedHeap* _g1h;
6575   nmethod* _nm;
6576 
6577   template <class T> void do_oop_work(T* p) {
6578     T heap_oop = oopDesc::load_heap_oop(p);
6579     if (!oopDesc::is_null(heap_oop)) {
6580       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6581       HeapRegion* hr = _g1h->heap_region_containing(obj);
6582       assert(!hr->isHumongous(), "code root in humongous region?");
6583       hr->remove_strong_code_root(_nm);
6584       assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?");
6585     }
6586   }
6587 
6588 public:
6589   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6590     _g1h(g1h), _nm(nm) {}
6591 
6592   void do_oop(oop* p)       { do_oop_work(p); }
6593   void do_oop(narrowOop* p) { do_oop_work(p); }
6594 };
6595 
6596 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6597   guarantee(nm != NULL, "sanity");
6598   RegisterNMethodOopClosure reg_cl(this, nm);
6599   nm->oops_do(&reg_cl);
6600 }
6601 
6602 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
6603   guarantee(nm != NULL, "sanity");
6604   UnregisterNMethodOopClosure reg_cl(this, nm);
6605   nm->oops_do(&reg_cl);
6606 }
6607 
6608 class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
6609 public:
6610   bool doHeapRegion(HeapRegion *hr) {
6611     assert(!hr->isHumongous(), "humongous region in collection set?");
6612     hr->migrate_strong_code_roots();
6613     return false;
6614   }
6615 };
6616 
6617 void G1CollectedHeap::migrate_strong_code_roots() {
6618   MigrateCodeRootsHeapRegionClosure cl;
6619   double migrate_start = os::elapsedTime();
6620   collection_set_iterate(&cl);
6621   double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
6622   g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
6623 }
6624 
6625 // Mark all the code roots that point into regions *not* in the
6626 // collection set.
6627 //
6628 // Note we do not want to use a "marking" CodeBlobToOopClosure while
6629 // walking the the code roots lists of regions not in the collection
6630 // set. Suppose we have an nmethod (M) that points to objects in two
6631 // separate regions - one in the collection set (R1) and one not (R2).
6632 // Using a "marking" CodeBlobToOopClosure here would result in "marking"
6633 // nmethod M when walking the code roots for R1. When we come to scan
6634 // the code roots for R2, we would see that M is already marked and it
6635 // would be skipped and the objects in R2 that are referenced from M
6636 // would not be evacuated.
6637 
6638 class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
6639 
6640   class MarkStrongCodeRootOopClosure: public OopClosure {
6641     ConcurrentMark* _cm;
6642     HeapRegion* _hr;
6643     uint _worker_id;
6644 
6645     template <class T> void do_oop_work(T* p) {
6646       T heap_oop = oopDesc::load_heap_oop(p);
6647       if (!oopDesc::is_null(heap_oop)) {
6648         oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6649         // Only mark objects in the region (which is assumed
6650         // to be not in the collection set).
6651         if (_hr->is_in(obj)) {
6652           _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
6653         }
6654       }
6655     }
6656 
6657   public:
6658     MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
6659       _cm(cm), _hr(hr), _worker_id(worker_id) {
6660       assert(!_hr->in_collection_set(), "sanity");
6661     }
6662 
6663     void do_oop(narrowOop* p) { do_oop_work(p); }
6664     void do_oop(oop* p)       { do_oop_work(p); }
6665   };
6666 
6667   MarkStrongCodeRootOopClosure _oop_cl;
6668 
6669 public:
6670   MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
6671     _oop_cl(cm, hr, worker_id) {}
6672 
6673   void do_code_blob(CodeBlob* cb) {
6674     nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
6675     if (nm != NULL) {
6676       nm->oops_do(&_oop_cl);
6677     }
6678   }
6679 };
6680 
6681 class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
6682   G1CollectedHeap* _g1h;
6683   uint _worker_id;
6684 
6685 public:
6686   MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
6687     _g1h(g1h), _worker_id(worker_id) {}
6688 
6689   bool doHeapRegion(HeapRegion *hr) {
6690     HeapRegionRemSet* hrrs = hr->rem_set();
6691     if (hr->isHumongous()) {
6692       // Code roots should never be attached to a humongous region
6693       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
6694       return false;
6695     }
6696 
6697     if (hr->in_collection_set()) {
6698       // Don't mark code roots into regions in the collection set here.
6699       // They will be marked when we scan them.
6700       return false;
6701     }
6702 
6703     MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
6704     hr->strong_code_roots_do(&cb_cl);
6705     return false;
6706   }
6707 };
6708 
6709 void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
6710   MarkStrongCodeRootsHRClosure cl(this, worker_id);
6711   heap_region_par_iterate_chunked(&cl,
6712                                   worker_id,
6713                                   workers()->active_workers(),
6714                                   HeapRegion::ParMarkRootClaimValue);
6715 }
6716 
6717 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
6718   G1CollectedHeap* _g1h;
6719 
6720 public:
6721   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
6722     _g1h(g1h) {}
6723 
6724   void do_code_blob(CodeBlob* cb) {
6725     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
6726     if (nm == NULL) {
6727       return;
6728     }
6729 
6730     if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) {
6731       _g1h->register_nmethod(nm);
6732     }
6733   }
6734 };
6735 
6736 void G1CollectedHeap::rebuild_strong_code_roots() {
6737   RebuildStrongCodeRootClosure blob_cl(this);
6738   CodeCache::blobs_do(&blob_cl);
6739 }