index

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7209 : 6979279


1421 
1422       // Resize the heap if necessary.
1423       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1424 
1425       if (_hr_printer.is_active()) {
1426         // We should do this after we potentially resize the heap so
1427         // that all the COMMIT / UNCOMMIT events are generated before
1428         // the end GC event.
1429 
1430         print_hrm_post_compaction();
1431         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1432       }
1433 
1434       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1435       if (hot_card_cache->use_cache()) {
1436         hot_card_cache->reset_card_counts();
1437         hot_card_cache->reset_hot_cache();
1438       }
1439 
1440       // Rebuild remembered sets of all regions.
1441       if (G1CollectedHeap::use_parallel_gc_threads()) {
1442         uint n_workers =
1443           AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1444                                                   workers()->active_workers(),
1445                                                   Threads::number_of_non_daemon_threads());
1446         assert(UseDynamicNumberOfGCThreads ||
1447                n_workers == workers()->total_workers(),
1448                "If not dynamic should be using all the  workers");
1449         workers()->set_active_workers(n_workers);
1450         // Set parallel threads in the heap (_n_par_threads) only
1451         // before a parallel phase and always reset it to 0 after
1452         // the phase so that the number of parallel threads does
1453         // no get carried forward to a serial phase where there
1454         // may be code that is "possibly_parallel".
1455         set_par_threads(n_workers);
1456 
1457         ParRebuildRSTask rebuild_rs_task(this);
1458         assert(UseDynamicNumberOfGCThreads ||
1459                workers()->active_workers() == workers()->total_workers(),
1460                "Unless dynamic should use total workers");
1461         // Use the most recent number of  active workers
1462         assert(workers()->active_workers() > 0,
1463                "Active workers not properly set");
1464         set_par_threads(workers()->active_workers());
1465         workers()->run_task(&rebuild_rs_task);
1466         set_par_threads(0);
1467       } else {
1468         RebuildRSOutOfRegionClosure rebuild_rs(this);
1469         heap_region_iterate(&rebuild_rs);
1470       }
1471 
1472       // Rebuild the strong code root lists for each region
1473       rebuild_strong_code_roots();
1474 
1475       if (true) { // FIXME
1476         MetaspaceGC::compute_new_size();
1477       }
1478 
1479 #ifdef TRACESPINNING
1480       ParallelTaskTerminator::print_termination_counts();
1481 #endif
1482 
1483       // Discard all rset updates
1484       JavaThread::dirty_card_queue_set().abandon_logs();
1485       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1486 
1487       _young_list->reset_sampled_info();
1488       // At this point there should be no regions in the
1489       // entire heap tagged as young.
1490       assert(check_young_list_empty(true /* check_heap */),


2661     // during the current pause - so it's valid.
2662     // Note: the cached starting heap region may be NULL
2663     // (when the collection set is empty).
2664     result = _worker_cset_start_region[worker_i];
2665     assert(result == NULL || result->in_collection_set(), "sanity");
2666     return result;
2667   }
2668 
2669   // The cached entry was not valid so let's calculate
2670   // a suitable starting heap region for this worker.
2671 
2672   // We want the parallel threads to start their collection
2673   // set iteration at different collection set regions to
2674   // avoid contention.
2675   // If we have:
2676   //          n collection set regions
2677   //          p threads
2678   // Then thread t will start at region floor ((t * n) / p)
2679 
2680   result = g1_policy()->collection_set();
2681   if (G1CollectedHeap::use_parallel_gc_threads()) {
2682     uint cs_size = g1_policy()->cset_region_length();
2683     uint active_workers = workers()->active_workers();
2684     assert(UseDynamicNumberOfGCThreads ||
2685              active_workers == workers()->total_workers(),
2686              "Unless dynamic should use total workers");
2687 
2688     uint end_ind   = (cs_size * worker_i) / active_workers;
2689     uint start_ind = 0;
2690 
2691     if (worker_i > 0 &&
2692         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2693       // Previous workers starting region is valid
2694       // so let's iterate from there
2695       start_ind = (cs_size * (worker_i - 1)) / active_workers;
2696       result = _worker_cset_start_region[worker_i - 1];
2697     }
2698 
2699     for (uint i = start_ind; i < end_ind; i++) {
2700       result = result->next_in_collection_set();
2701     }
2702   }
2703 
2704   // Note: the calculated starting heap region may be NULL
2705   // (when the collection set is empty).
2706   assert(result == NULL || result->in_collection_set(), "sanity");
2707   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2708          "should be updated only once per pause");
2709   _worker_cset_start_region[worker_i] = result;
2710   OrderAccess::storestore();
2711   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2712   return result;
2713 }
2714 
2715 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2716   HeapRegion* r = g1_policy()->collection_set();
2717   while (r != NULL) {
2718     HeapRegion* next = r->next_in_collection_set();
2719     if (cl->doHeapRegion(r)) {
2720       cl->incomplete();
2721       return;
2722     }


3356   st->cr();
3357   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3358                "HS=humongous(starts), HC=humongous(continues), "
3359                "CS=collection set, F=free, TS=gc time stamp, "
3360                "PTAMS=previous top-at-mark-start, "
3361                "NTAMS=next top-at-mark-start)");
3362   PrintRegionClosure blk(st);
3363   heap_region_iterate(&blk);
3364 }
3365 
3366 void G1CollectedHeap::print_on_error(outputStream* st) const {
3367   this->CollectedHeap::print_on_error(st);
3368 
3369   if (_cm != NULL) {
3370     st->cr();
3371     _cm->print_on_error(st);
3372   }
3373 }
3374 
3375 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3376   if (G1CollectedHeap::use_parallel_gc_threads()) {
3377     workers()->print_worker_threads_on(st);
3378   }
3379   _cmThread->print_on(st);
3380   st->cr();
3381   _cm->print_worker_threads_on(st);
3382   _cg1r->print_worker_threads_on(st);
3383   if (G1StringDedup::is_enabled()) {
3384     G1StringDedup::print_worker_threads_on(st);
3385   }
3386 }
3387 
3388 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3389   if (G1CollectedHeap::use_parallel_gc_threads()) {
3390     workers()->threads_do(tc);
3391   }
3392   tc->do_thread(_cmThread);
3393   _cg1r->threads_do(tc);
3394   if (G1StringDedup::is_enabled()) {
3395     G1StringDedup::threads_do(tc);
3396   }
3397 }
3398 
3399 void G1CollectedHeap::print_tracing_info() const {
3400   // We'll overload this to mean "trace GC pause statistics."
3401   if (TraceYoungGenTime || TraceOldGenTime) {
3402     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3403     // to that.
3404     g1_policy()->print_tracing_info();
3405   }
3406   if (G1SummarizeRSetStats) {
3407     g1_rem_set()->print_summary_info();
3408   }
3409   if (G1SummarizeConcMark) {
3410     concurrent_mark()->print_summary_info();
3411   }


3666     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3667 
3668     // Here's a good place to add any other checks we'd like to
3669     // perform on CSet regions.
3670     return false;
3671   }
3672 };
3673 #endif // ASSERT
3674 
3675 #if TASKQUEUE_STATS
3676 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3677   st->print_raw_cr("GC Task Stats");
3678   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3679   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3680 }
3681 
3682 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3683   print_taskqueue_stats_hdr(st);
3684 
3685   TaskQueueStats totals;
3686   const int n = workers() != NULL ? workers()->total_workers() : 1;
3687   for (int i = 0; i < n; ++i) {
3688     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3689     totals += task_queue(i)->stats;
3690   }
3691   st->print_raw("tot "); totals.print(st); st->cr();
3692 
3693   DEBUG_ONLY(totals.verify());
3694 }
3695 
3696 void G1CollectedHeap::reset_taskqueue_stats() {
3697   const int n = workers() != NULL ? workers()->total_workers() : 1;
3698   for (int i = 0; i < n; ++i) {
3699     task_queue(i)->stats.reset();
3700   }
3701 }
3702 #endif // TASKQUEUE_STATS
3703 
3704 void G1CollectedHeap::log_gc_header() {
3705   if (!G1Log::fine()) {
3706     return;
3707   }
3708 
3709   gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3710 
3711   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3712     .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3713     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3714 
3715   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3716 }
3717 


3775   // Record whether this pause is an initial mark. When the current
3776   // thread has completed its logging output and it's safe to signal
3777   // the CM thread, the flag's value in the policy has been reset.
3778   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3779 
3780   // Inner scope for scope based logging, timers, and stats collection
3781   {
3782     EvacuationInfo evacuation_info;
3783 
3784     if (g1_policy()->during_initial_mark_pause()) {
3785       // We are about to start a marking cycle, so we increment the
3786       // full collection counter.
3787       increment_old_marking_cycles_started();
3788       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3789     }
3790 
3791     _gc_tracer_stw->report_yc_type(yc_type());
3792 
3793     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3794 
3795     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3796                                 workers()->active_workers() : 1);
3797     double pause_start_sec = os::elapsedTime();
3798     g1_policy()->phase_times()->note_gc_start(active_workers);
3799     log_gc_header();
3800 
3801     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3802     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3803 
3804     // If the secondary_free_list is not empty, append it to the
3805     // free_list. No need to wait for the cleanup operation to finish;
3806     // the region allocation code will check the secondary_free_list
3807     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3808     // set, skip this step so that the region allocation code has to
3809     // get entries from the secondary_free_list.
3810     if (!G1StressConcRegionFreeing) {
3811       append_secondary_free_list_if_not_empty_with_lock();
3812     }
3813 
3814     assert(check_young_list_well_formed(), "young list should be well formed");
3815 
3816     // Don't dynamically change the number of GC threads this early.  A value of


4770 
4771   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4772 
4773   _process_strong_tasks->all_tasks_completed();
4774 }
4775 
4776 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4777 private:
4778   BoolObjectClosure* _is_alive;
4779   int _initial_string_table_size;
4780   int _initial_symbol_table_size;
4781 
4782   bool  _process_strings;
4783   int _strings_processed;
4784   int _strings_removed;
4785 
4786   bool  _process_symbols;
4787   int _symbols_processed;
4788   int _symbols_removed;
4789 
4790   bool _do_in_parallel;
4791 public:
4792   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4793     AbstractGangTask("String/Symbol Unlinking"),
4794     _is_alive(is_alive),
4795     _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
4796     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4797     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4798 
4799     _initial_string_table_size = StringTable::the_table()->table_size();
4800     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4801     if (process_strings) {
4802       StringTable::clear_parallel_claimed_index();
4803     }
4804     if (process_symbols) {
4805       SymbolTable::clear_parallel_claimed_index();
4806     }
4807   }
4808 
4809   ~G1StringSymbolTableUnlinkTask() {
4810     guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4811               err_msg("claim value %d after unlink less than initial string table size %d",
4812                       StringTable::parallel_claimed_index(), _initial_string_table_size));
4813     guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4814               err_msg("claim value %d after unlink less than initial symbol table size %d",
4815                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4816 
4817     if (G1TraceStringSymbolTableScrubbing) {
4818       gclog_or_tty->print_cr("Cleaned string and symbol table, "
4819                              "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
4820                              "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
4821                              strings_processed(), strings_removed(),
4822                              symbols_processed(), symbols_removed());
4823     }
4824   }
4825 
4826   void work(uint worker_id) {
4827     if (_do_in_parallel) {
4828       int strings_processed = 0;
4829       int strings_removed = 0;
4830       int symbols_processed = 0;
4831       int symbols_removed = 0;
4832       if (_process_strings) {
4833         StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4834         Atomic::add(strings_processed, &_strings_processed);
4835         Atomic::add(strings_removed, &_strings_removed);
4836       }
4837       if (_process_symbols) {
4838         SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4839         Atomic::add(symbols_processed, &_symbols_processed);
4840         Atomic::add(symbols_removed, &_symbols_removed);
4841       }
4842     } else {
4843       if (_process_strings) {
4844         StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
4845       }
4846       if (_process_symbols) {
4847         SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
4848       }
4849     }
4850   }
4851 
4852   size_t strings_processed() const { return (size_t)_strings_processed; }
4853   size_t strings_removed()   const { return (size_t)_strings_removed; }
4854 
4855   size_t symbols_processed() const { return (size_t)_symbols_processed; }
4856   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
4857 };
4858 
4859 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
4860 private:
4861   static Monitor* _lock;
4862 
4863   BoolObjectClosure* const _is_alive;
4864   const bool               _unloading_occurred;
4865   const uint               _num_workers;
4866 
4867   // Variables used to claim nmethods.
4868   nmethod* _first_nmethod;
4869   volatile nmethod* _claimed_nmethod;


5110     // Clean the Strings and Symbols.
5111     _string_symbol_task.work(worker_id);
5112 
5113     // Wait for all workers to finish the first code cache cleaning pass.
5114     _code_cache_task.barrier_wait(worker_id);
5115 
5116     // Do the second code cache cleaning work, which realize on
5117     // the liveness information gathered during the first pass.
5118     _code_cache_task.work_second_pass(worker_id);
5119 
5120     // Clean all klasses that were not unloaded.
5121     _klass_cleaning_task.work();
5122   }
5123 };
5124 
5125 
5126 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5127                                         bool process_strings,
5128                                         bool process_symbols,
5129                                         bool class_unloading_occurred) {
5130   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5131                     workers()->active_workers() : 1);
5132 
5133   G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5134                                         n_workers, class_unloading_occurred);
5135   if (G1CollectedHeap::use_parallel_gc_threads()) {
5136     set_par_threads(n_workers);
5137     workers()->run_task(&g1_unlink_task);
5138     set_par_threads(0);
5139   } else {
5140     g1_unlink_task.work(0);
5141   }
5142 }
5143 
5144 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5145                                                      bool process_strings, bool process_symbols) {
5146   {
5147     uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5148                      _g1h->workers()->active_workers() : 1);
5149     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5150     if (G1CollectedHeap::use_parallel_gc_threads()) {
5151       set_par_threads(n_workers);
5152       workers()->run_task(&g1_unlink_task);
5153       set_par_threads(0);
5154     } else {
5155       g1_unlink_task.work(0);
5156     }
5157   }
5158 
5159   if (G1StringDedup::is_enabled()) {
5160     G1StringDedup::unlink(is_alive);
5161   }
5162 }
5163 
5164 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5165  private:
5166   DirtyCardQueueSet* _queue;
5167  public:
5168   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5169 
5170   virtual void work(uint worker_id) {
5171     double start_time = os::elapsedTime();
5172 
5173     RedirtyLoggedCardTableEntryClosure cl;
5174     if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5175       _queue->par_apply_closure_to_all_completed_buffers(&cl);
5176     } else {
5177       _queue->apply_closure_to_all_completed_buffers(&cl);
5178     }
5179 
5180     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5181     timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5182     timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5183   }
5184 };
5185 
5186 void G1CollectedHeap::redirty_logged_cards() {
5187   double redirty_logged_cards_start = os::elapsedTime();
5188 
5189   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5190                    _g1h->workers()->active_workers() : 1);
5191 
5192   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5193   dirty_card_queue_set().reset_for_par_iteration();
5194   if (use_parallel_gc_threads()) {
5195     set_par_threads(n_workers);
5196     workers()->run_task(&redirty_task);
5197     set_par_threads(0);
5198   } else {
5199     redirty_task.work(0);
5200   }
5201 
5202   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5203   dcq.merge_bufferlists(&dirty_card_queue_set());
5204   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5205 
5206   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5207 }
5208 
5209 // Weak Reference Processing support
5210 
5211 // An always "is_alive" closure that is used to preserve referents.
5212 // If the object is non-null then it's alive.  Used in the preservation
5213 // of referent objects that are pointed to by reference objects
5214 // discovered by the CM ref processor.
5215 class G1AlwaysAliveClosure: public BoolObjectClosure {
5216   G1CollectedHeap* _g1;
5217 public:
5218   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5219   bool do_object_b(oop p) {
5220     if (p != NULL) {


5570   // by the CM ref processor should have already been copied (either by
5571   // applying the external root copy closure to the discovered lists, or
5572   // by following an RSet entry).
5573   //
5574   // But some of the referents, that are in the collection set, that these
5575   // reference objects point to may not have been copied: the STW ref
5576   // processor would have seen that the reference object had already
5577   // been 'discovered' and would have skipped discovering the reference,
5578   // but would not have treated the reference object as a regular oop.
5579   // As a result the copy closure would not have been applied to the
5580   // referent object.
5581   //
5582   // We need to explicitly copy these referent objects - the references
5583   // will be processed at the end of remarking.
5584   //
5585   // We also need to do this copying before we process the reference
5586   // objects discovered by the STW ref processor in case one of these
5587   // referents points to another object which is also referenced by an
5588   // object discovered by the STW ref processor.
5589 
5590   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5591            no_of_gc_workers == workers()->active_workers(),
5592            "Need to reset active GC workers");
5593 
5594   set_par_threads(no_of_gc_workers);
5595   G1ParPreserveCMReferentsTask keep_cm_referents(this,
5596                                                  no_of_gc_workers,
5597                                                  _task_queues);
5598 
5599   if (G1CollectedHeap::use_parallel_gc_threads()) {
5600     workers()->run_task(&keep_cm_referents);
5601   } else {
5602     keep_cm_referents.work(0);
5603   }
5604 
5605   set_par_threads(0);
5606 
5607   // Closure to test whether a referent is alive.
5608   G1STWIsAliveClosure is_alive(this);
5609 
5610   // Even when parallel reference processing is enabled, the processing
5611   // of JNI refs is serial and performed serially by the current thread
5612   // rather than by a worker. The following PSS will be used for processing
5613   // JNI refs.
5614 
5615   // Use only a single queue for this PSS.
5616   G1ParScanThreadState            pss(this, 0, NULL);
5617 
5618   // We do not embed a reference processor in the copying/scanning
5619   // closures while we're actually processing the discovered
5620   // reference objects.
5621   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5622 
5623   pss.set_evac_failure_closure(&evac_failure_cl);


5710 
5711   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5712   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5713 }
5714 
5715 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5716   _expand_heap_after_alloc_failure = true;
5717   _evacuation_failed = false;
5718 
5719   // Should G1EvacuationFailureALot be in effect for this GC?
5720   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5721 
5722   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5723 
5724   // Disable the hot card cache.
5725   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5726   hot_card_cache->reset_hot_cache_claimed_index();
5727   hot_card_cache->set_use_cache(false);
5728 
5729   uint n_workers;
5730   if (G1CollectedHeap::use_parallel_gc_threads()) {
5731     n_workers =
5732       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5733                                      workers()->active_workers(),
5734                                      Threads::number_of_non_daemon_threads());
5735     assert(UseDynamicNumberOfGCThreads ||
5736            n_workers == workers()->total_workers(),
5737            "If not dynamic should be using all the  workers");
5738     workers()->set_active_workers(n_workers);
5739     set_par_threads(n_workers);
5740   } else {
5741     assert(n_par_threads() == 0,
5742            "Should be the original non-parallel value");
5743     n_workers = 1;
5744   }
5745 
5746   G1ParTask g1_par_task(this, _task_queues);
5747 
5748   init_for_evac_failure(NULL);
5749 
5750   rem_set()->prepare_for_younger_refs_iterate(true);
5751 
5752   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5753   double start_par_time_sec = os::elapsedTime();
5754   double end_par_time_sec;
5755 
5756   {
5757     StrongRootsScope srs(this);
5758     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5759     if (g1_policy()->during_initial_mark_pause()) {
5760       ClassLoaderDataGraph::clear_claimed_marks();
5761     }
5762 
5763     if (G1CollectedHeap::use_parallel_gc_threads()) {
5764       // The individual threads will set their evac-failure closures.
5765       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5766       // These tasks use ShareHeap::_process_strong_tasks
5767       assert(UseDynamicNumberOfGCThreads ||
5768              workers()->active_workers() == workers()->total_workers(),
5769              "If not dynamic should be using all the  workers");
5770       workers()->run_task(&g1_par_task);
5771     } else {
5772       g1_par_task.set_for_termination(n_workers);
5773       g1_par_task.work(0);
5774     }
5775     end_par_time_sec = os::elapsedTime();
5776 
5777     // Closing the inner scope will execute the destructor
5778     // for the StrongRootsScope object. We record the current
5779     // elapsed time before closing the scope so that time
5780     // taken for the SRS destructor is NOT included in the
5781     // reported parallel time.
5782   }
5783 
5784   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5785   g1_policy()->phase_times()->record_par_time(par_time_ms);
5786 
5787   double code_root_fixup_time_ms =
5788         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5789   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5790 
5791   set_par_threads(0);
5792 
5793   // Process any discovered reference objects - we have
5794   // to do this _before_ we retire the GC alloc regions


6060   }
6061 };
6062 
6063 void G1CollectedHeap::check_bitmaps(const char* caller) {
6064   if (!G1VerifyBitmaps) return;
6065 
6066   G1VerifyBitmapClosure cl(caller, this);
6067   heap_region_iterate(&cl);
6068   guarantee(!cl.failures(), "bitmap verification");
6069 }
6070 #endif // PRODUCT
6071 
6072 void G1CollectedHeap::cleanUpCardTable() {
6073   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6074   double start = os::elapsedTime();
6075 
6076   {
6077     // Iterate over the dirty cards region list.
6078     G1ParCleanupCTTask cleanup_task(ct_bs, this);
6079 
6080     if (G1CollectedHeap::use_parallel_gc_threads()) {
6081       set_par_threads();
6082       workers()->run_task(&cleanup_task);
6083       set_par_threads(0);
6084     } else {
6085       while (_dirty_cards_region_list) {
6086         HeapRegion* r = _dirty_cards_region_list;
6087         cleanup_task.clear_cards(r);
6088         _dirty_cards_region_list = r->get_next_dirty_cards_region();
6089         if (_dirty_cards_region_list == r) {
6090           // The last region.
6091           _dirty_cards_region_list = NULL;
6092         }
6093         r->set_next_dirty_cards_region(NULL);
6094       }
6095     }
6096 #ifndef PRODUCT
6097     if (G1VerifyCTCleanup || VerifyAfterGC) {
6098       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
6099       heap_region_iterate(&cleanup_verifier);
6100     }
6101 #endif
6102   }
6103 
6104   double elapsed = os::elapsedTime() - start;
6105   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
6106 }
6107 
6108 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
6109   size_t pre_used = 0;
6110   FreeRegionList local_free_list("Local List for CSet Freeing");
6111 
6112   double young_time_ms     = 0.0;
6113   double non_young_time_ms = 0.0;
6114 
6115   // Since the collection set is a superset of the the young list,


6615   return NULL;
6616 }
6617 
6618 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6619                                                   size_t allocated_bytes) {
6620   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6621   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6622 
6623   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6624   _allocator->increase_used(allocated_bytes);
6625   _hr_printer.retire(alloc_region);
6626   // We update the eden sizes here, when the region is retired,
6627   // instead of when it's allocated, since this is the point that its
6628   // used space has been recored in _summary_bytes_used.
6629   g1mm()->update_eden_size();
6630 }
6631 
6632 void G1CollectedHeap::set_par_threads() {
6633   // Don't change the number of workers.  Use the value previously set
6634   // in the workgroup.
6635   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6636   uint n_workers = workers()->active_workers();
6637   assert(UseDynamicNumberOfGCThreads ||
6638            n_workers == workers()->total_workers(),
6639       "Otherwise should be using the total number of workers");
6640   if (n_workers == 0) {
6641     assert(false, "Should have been set in prior evacuation pause.");
6642     n_workers = ParallelGCThreads;
6643     workers()->set_active_workers(n_workers);
6644   }
6645   set_par_threads(n_workers);
6646 }
6647 
6648 // Methods for the GC alloc regions
6649 
6650 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6651                                                  uint count,
6652                                                  GCAllocPurpose ap) {
6653   assert(FreeList_lock->owned_by_self(), "pre-condition");
6654 
6655   if (count < g1_policy()->max_regions(ap)) {




1421 
1422       // Resize the heap if necessary.
1423       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1424 
1425       if (_hr_printer.is_active()) {
1426         // We should do this after we potentially resize the heap so
1427         // that all the COMMIT / UNCOMMIT events are generated before
1428         // the end GC event.
1429 
1430         print_hrm_post_compaction();
1431         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1432       }
1433 
1434       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1435       if (hot_card_cache->use_cache()) {
1436         hot_card_cache->reset_card_counts();
1437         hot_card_cache->reset_hot_cache();
1438       }
1439 
1440       // Rebuild remembered sets of all regions.

1441       uint n_workers =
1442         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1443                                                 workers()->active_workers(),
1444                                                 Threads::number_of_non_daemon_threads());
1445       assert(UseDynamicNumberOfGCThreads ||
1446              n_workers == workers()->total_workers(),
1447              "If not dynamic should be using all the  workers");
1448       workers()->set_active_workers(n_workers);
1449       // Set parallel threads in the heap (_n_par_threads) only
1450       // before a parallel phase and always reset it to 0 after
1451       // the phase so that the number of parallel threads does
1452       // no get carried forward to a serial phase where there
1453       // may be code that is "possibly_parallel".
1454       set_par_threads(n_workers);
1455 
1456       ParRebuildRSTask rebuild_rs_task(this);
1457       assert(UseDynamicNumberOfGCThreads ||
1458              workers()->active_workers() == workers()->total_workers(),
1459              "Unless dynamic should use total workers");
1460       // Use the most recent number of  active workers
1461       assert(workers()->active_workers() > 0,
1462              "Active workers not properly set");
1463       set_par_threads(workers()->active_workers());
1464       workers()->run_task(&rebuild_rs_task);
1465       set_par_threads(0);




1466 
1467       // Rebuild the strong code root lists for each region
1468       rebuild_strong_code_roots();
1469 
1470       if (true) { // FIXME
1471         MetaspaceGC::compute_new_size();
1472       }
1473 
1474 #ifdef TRACESPINNING
1475       ParallelTaskTerminator::print_termination_counts();
1476 #endif
1477 
1478       // Discard all rset updates
1479       JavaThread::dirty_card_queue_set().abandon_logs();
1480       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1481 
1482       _young_list->reset_sampled_info();
1483       // At this point there should be no regions in the
1484       // entire heap tagged as young.
1485       assert(check_young_list_empty(true /* check_heap */),


2656     // during the current pause - so it's valid.
2657     // Note: the cached starting heap region may be NULL
2658     // (when the collection set is empty).
2659     result = _worker_cset_start_region[worker_i];
2660     assert(result == NULL || result->in_collection_set(), "sanity");
2661     return result;
2662   }
2663 
2664   // The cached entry was not valid so let's calculate
2665   // a suitable starting heap region for this worker.
2666 
2667   // We want the parallel threads to start their collection
2668   // set iteration at different collection set regions to
2669   // avoid contention.
2670   // If we have:
2671   //          n collection set regions
2672   //          p threads
2673   // Then thread t will start at region floor ((t * n) / p)
2674 
2675   result = g1_policy()->collection_set();

2676   uint cs_size = g1_policy()->cset_region_length();
2677   uint active_workers = workers()->active_workers();
2678   assert(UseDynamicNumberOfGCThreads ||
2679            active_workers == workers()->total_workers(),
2680            "Unless dynamic should use total workers");
2681 
2682   uint end_ind   = (cs_size * worker_i) / active_workers;
2683   uint start_ind = 0;
2684 
2685   if (worker_i > 0 &&
2686       _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2687     // Previous workers starting region is valid
2688     // so let's iterate from there
2689     start_ind = (cs_size * (worker_i - 1)) / active_workers;
2690     result = _worker_cset_start_region[worker_i - 1];
2691   }
2692 
2693   for (uint i = start_ind; i < end_ind; i++) {
2694     result = result->next_in_collection_set();
2695   }

2696 
2697   // Note: the calculated starting heap region may be NULL
2698   // (when the collection set is empty).
2699   assert(result == NULL || result->in_collection_set(), "sanity");
2700   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2701          "should be updated only once per pause");
2702   _worker_cset_start_region[worker_i] = result;
2703   OrderAccess::storestore();
2704   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2705   return result;
2706 }
2707 
2708 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2709   HeapRegion* r = g1_policy()->collection_set();
2710   while (r != NULL) {
2711     HeapRegion* next = r->next_in_collection_set();
2712     if (cl->doHeapRegion(r)) {
2713       cl->incomplete();
2714       return;
2715     }


3349   st->cr();
3350   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3351                "HS=humongous(starts), HC=humongous(continues), "
3352                "CS=collection set, F=free, TS=gc time stamp, "
3353                "PTAMS=previous top-at-mark-start, "
3354                "NTAMS=next top-at-mark-start)");
3355   PrintRegionClosure blk(st);
3356   heap_region_iterate(&blk);
3357 }
3358 
3359 void G1CollectedHeap::print_on_error(outputStream* st) const {
3360   this->CollectedHeap::print_on_error(st);
3361 
3362   if (_cm != NULL) {
3363     st->cr();
3364     _cm->print_on_error(st);
3365   }
3366 }
3367 
3368 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {

3369   workers()->print_worker_threads_on(st);

3370   _cmThread->print_on(st);
3371   st->cr();
3372   _cm->print_worker_threads_on(st);
3373   _cg1r->print_worker_threads_on(st);
3374   if (G1StringDedup::is_enabled()) {
3375     G1StringDedup::print_worker_threads_on(st);
3376   }
3377 }
3378 
3379 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {

3380   workers()->threads_do(tc);

3381   tc->do_thread(_cmThread);
3382   _cg1r->threads_do(tc);
3383   if (G1StringDedup::is_enabled()) {
3384     G1StringDedup::threads_do(tc);
3385   }
3386 }
3387 
3388 void G1CollectedHeap::print_tracing_info() const {
3389   // We'll overload this to mean "trace GC pause statistics."
3390   if (TraceYoungGenTime || TraceOldGenTime) {
3391     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3392     // to that.
3393     g1_policy()->print_tracing_info();
3394   }
3395   if (G1SummarizeRSetStats) {
3396     g1_rem_set()->print_summary_info();
3397   }
3398   if (G1SummarizeConcMark) {
3399     concurrent_mark()->print_summary_info();
3400   }


3655     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3656 
3657     // Here's a good place to add any other checks we'd like to
3658     // perform on CSet regions.
3659     return false;
3660   }
3661 };
3662 #endif // ASSERT
3663 
3664 #if TASKQUEUE_STATS
3665 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3666   st->print_raw_cr("GC Task Stats");
3667   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3668   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3669 }
3670 
3671 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3672   print_taskqueue_stats_hdr(st);
3673 
3674   TaskQueueStats totals;
3675   const int n = workers()->total_workers();
3676   for (int i = 0; i < n; ++i) {
3677     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3678     totals += task_queue(i)->stats;
3679   }
3680   st->print_raw("tot "); totals.print(st); st->cr();
3681 
3682   DEBUG_ONLY(totals.verify());
3683 }
3684 
3685 void G1CollectedHeap::reset_taskqueue_stats() {
3686   const int n = workers()->total_workers();
3687   for (int i = 0; i < n; ++i) {
3688     task_queue(i)->stats.reset();
3689   }
3690 }
3691 #endif // TASKQUEUE_STATS
3692 
3693 void G1CollectedHeap::log_gc_header() {
3694   if (!G1Log::fine()) {
3695     return;
3696   }
3697 
3698   gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3699 
3700   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3701     .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3702     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3703 
3704   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3705 }
3706 


3764   // Record whether this pause is an initial mark. When the current
3765   // thread has completed its logging output and it's safe to signal
3766   // the CM thread, the flag's value in the policy has been reset.
3767   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3768 
3769   // Inner scope for scope based logging, timers, and stats collection
3770   {
3771     EvacuationInfo evacuation_info;
3772 
3773     if (g1_policy()->during_initial_mark_pause()) {
3774       // We are about to start a marking cycle, so we increment the
3775       // full collection counter.
3776       increment_old_marking_cycles_started();
3777       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3778     }
3779 
3780     _gc_tracer_stw->report_yc_type(yc_type());
3781 
3782     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3783 
3784     int active_workers = workers()->active_workers();

3785     double pause_start_sec = os::elapsedTime();
3786     g1_policy()->phase_times()->note_gc_start(active_workers);
3787     log_gc_header();
3788 
3789     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3790     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3791 
3792     // If the secondary_free_list is not empty, append it to the
3793     // free_list. No need to wait for the cleanup operation to finish;
3794     // the region allocation code will check the secondary_free_list
3795     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3796     // set, skip this step so that the region allocation code has to
3797     // get entries from the secondary_free_list.
3798     if (!G1StressConcRegionFreeing) {
3799       append_secondary_free_list_if_not_empty_with_lock();
3800     }
3801 
3802     assert(check_young_list_well_formed(), "young list should be well formed");
3803 
3804     // Don't dynamically change the number of GC threads this early.  A value of


4758 
4759   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4760 
4761   _process_strong_tasks->all_tasks_completed();
4762 }
4763 
4764 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4765 private:
4766   BoolObjectClosure* _is_alive;
4767   int _initial_string_table_size;
4768   int _initial_symbol_table_size;
4769 
4770   bool  _process_strings;
4771   int _strings_processed;
4772   int _strings_removed;
4773 
4774   bool  _process_symbols;
4775   int _symbols_processed;
4776   int _symbols_removed;
4777 

4778 public:
4779   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4780     AbstractGangTask("String/Symbol Unlinking"),
4781     _is_alive(is_alive),

4782     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4783     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4784 
4785     _initial_string_table_size = StringTable::the_table()->table_size();
4786     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4787     if (process_strings) {
4788       StringTable::clear_parallel_claimed_index();
4789     }
4790     if (process_symbols) {
4791       SymbolTable::clear_parallel_claimed_index();
4792     }
4793   }
4794 
4795   ~G1StringSymbolTableUnlinkTask() {
4796     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4797               err_msg("claim value %d after unlink less than initial string table size %d",
4798                       StringTable::parallel_claimed_index(), _initial_string_table_size));
4799     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4800               err_msg("claim value %d after unlink less than initial symbol table size %d",
4801                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4802 
4803     if (G1TraceStringSymbolTableScrubbing) {
4804       gclog_or_tty->print_cr("Cleaned string and symbol table, "
4805                              "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
4806                              "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
4807                              strings_processed(), strings_removed(),
4808                              symbols_processed(), symbols_removed());
4809     }
4810   }
4811 
4812   void work(uint worker_id) {

4813     int strings_processed = 0;
4814     int strings_removed = 0;
4815     int symbols_processed = 0;
4816     int symbols_removed = 0;
4817     if (_process_strings) {
4818       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4819       Atomic::add(strings_processed, &_strings_processed);
4820       Atomic::add(strings_removed, &_strings_removed);
4821     }
4822     if (_process_symbols) {
4823       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4824       Atomic::add(symbols_processed, &_symbols_processed);
4825       Atomic::add(symbols_removed, &_symbols_removed);
4826     }








4827   }
4828 
4829   size_t strings_processed() const { return (size_t)_strings_processed; }
4830   size_t strings_removed()   const { return (size_t)_strings_removed; }
4831 
4832   size_t symbols_processed() const { return (size_t)_symbols_processed; }
4833   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
4834 };
4835 
4836 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
4837 private:
4838   static Monitor* _lock;
4839 
4840   BoolObjectClosure* const _is_alive;
4841   const bool               _unloading_occurred;
4842   const uint               _num_workers;
4843 
4844   // Variables used to claim nmethods.
4845   nmethod* _first_nmethod;
4846   volatile nmethod* _claimed_nmethod;


5087     // Clean the Strings and Symbols.
5088     _string_symbol_task.work(worker_id);
5089 
5090     // Wait for all workers to finish the first code cache cleaning pass.
5091     _code_cache_task.barrier_wait(worker_id);
5092 
5093     // Do the second code cache cleaning work, which realize on
5094     // the liveness information gathered during the first pass.
5095     _code_cache_task.work_second_pass(worker_id);
5096 
5097     // Clean all klasses that were not unloaded.
5098     _klass_cleaning_task.work();
5099   }
5100 };
5101 
5102 
5103 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5104                                         bool process_strings,
5105                                         bool process_symbols,
5106                                         bool class_unloading_occurred) {
5107   uint n_workers = workers()->active_workers();

5108 
5109   G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5110                                         n_workers, class_unloading_occurred);

5111   set_par_threads(n_workers);
5112   workers()->run_task(&g1_unlink_task);
5113   set_par_threads(0);



5114 }
5115 
5116 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5117                                                      bool process_strings, bool process_symbols) {
5118   {
5119     uint n_workers = _g1h->workers()->active_workers();

5120     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);

5121     set_par_threads(n_workers);
5122     workers()->run_task(&g1_unlink_task);
5123     set_par_threads(0);



5124   }
5125 
5126   if (G1StringDedup::is_enabled()) {
5127     G1StringDedup::unlink(is_alive);
5128   }
5129 }
5130 
5131 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5132  private:
5133   DirtyCardQueueSet* _queue;
5134  public:
5135   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5136 
5137   virtual void work(uint worker_id) {
5138     double start_time = os::elapsedTime();
5139 
5140     RedirtyLoggedCardTableEntryClosure cl;

5141     _queue->par_apply_closure_to_all_completed_buffers(&cl);



5142 
5143     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5144     timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5145     timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5146   }
5147 };
5148 
5149 void G1CollectedHeap::redirty_logged_cards() {
5150   double redirty_logged_cards_start = os::elapsedTime();
5151 
5152   uint n_workers = _g1h->workers()->active_workers();

5153 
5154   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5155   dirty_card_queue_set().reset_for_par_iteration();

5156   set_par_threads(n_workers);
5157   workers()->run_task(&redirty_task);
5158   set_par_threads(0);



5159 
5160   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5161   dcq.merge_bufferlists(&dirty_card_queue_set());
5162   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5163 
5164   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5165 }
5166 
5167 // Weak Reference Processing support
5168 
5169 // An always "is_alive" closure that is used to preserve referents.
5170 // If the object is non-null then it's alive.  Used in the preservation
5171 // of referent objects that are pointed to by reference objects
5172 // discovered by the CM ref processor.
5173 class G1AlwaysAliveClosure: public BoolObjectClosure {
5174   G1CollectedHeap* _g1;
5175 public:
5176   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5177   bool do_object_b(oop p) {
5178     if (p != NULL) {


5528   // by the CM ref processor should have already been copied (either by
5529   // applying the external root copy closure to the discovered lists, or
5530   // by following an RSet entry).
5531   //
5532   // But some of the referents, that are in the collection set, that these
5533   // reference objects point to may not have been copied: the STW ref
5534   // processor would have seen that the reference object had already
5535   // been 'discovered' and would have skipped discovering the reference,
5536   // but would not have treated the reference object as a regular oop.
5537   // As a result the copy closure would not have been applied to the
5538   // referent object.
5539   //
5540   // We need to explicitly copy these referent objects - the references
5541   // will be processed at the end of remarking.
5542   //
5543   // We also need to do this copying before we process the reference
5544   // objects discovered by the STW ref processor in case one of these
5545   // referents points to another object which is also referenced by an
5546   // object discovered by the STW ref processor.
5547 
5548   assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers");


5549 
5550   set_par_threads(no_of_gc_workers);
5551   G1ParPreserveCMReferentsTask keep_cm_referents(this,
5552                                                  no_of_gc_workers,
5553                                                  _task_queues);
5554 

5555   workers()->run_task(&keep_cm_referents);



5556 
5557   set_par_threads(0);
5558 
5559   // Closure to test whether a referent is alive.
5560   G1STWIsAliveClosure is_alive(this);
5561 
5562   // Even when parallel reference processing is enabled, the processing
5563   // of JNI refs is serial and performed serially by the current thread
5564   // rather than by a worker. The following PSS will be used for processing
5565   // JNI refs.
5566 
5567   // Use only a single queue for this PSS.
5568   G1ParScanThreadState            pss(this, 0, NULL);
5569 
5570   // We do not embed a reference processor in the copying/scanning
5571   // closures while we're actually processing the discovered
5572   // reference objects.
5573   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5574 
5575   pss.set_evac_failure_closure(&evac_failure_cl);


5662 
5663   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5664   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5665 }
5666 
5667 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5668   _expand_heap_after_alloc_failure = true;
5669   _evacuation_failed = false;
5670 
5671   // Should G1EvacuationFailureALot be in effect for this GC?
5672   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5673 
5674   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5675 
5676   // Disable the hot card cache.
5677   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5678   hot_card_cache->reset_hot_cache_claimed_index();
5679   hot_card_cache->set_use_cache(false);
5680 
5681   uint n_workers;

5682   n_workers =
5683     AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5684                                    workers()->active_workers(),
5685                                    Threads::number_of_non_daemon_threads());
5686   assert(UseDynamicNumberOfGCThreads ||
5687          n_workers == workers()->total_workers(),
5688          "If not dynamic should be using all the  workers");
5689   workers()->set_active_workers(n_workers);
5690   set_par_threads(n_workers);





5691 
5692   G1ParTask g1_par_task(this, _task_queues);
5693 
5694   init_for_evac_failure(NULL);
5695 
5696   rem_set()->prepare_for_younger_refs_iterate(true);
5697 
5698   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5699   double start_par_time_sec = os::elapsedTime();
5700   double end_par_time_sec;
5701 
5702   {
5703     StrongRootsScope srs(this);
5704     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5705     if (g1_policy()->during_initial_mark_pause()) {
5706       ClassLoaderDataGraph::clear_claimed_marks();
5707     }
5708 

5709     // The individual threads will set their evac-failure closures.
5710     if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5711     // These tasks use ShareHeap::_process_strong_tasks
5712     assert(UseDynamicNumberOfGCThreads ||
5713            workers()->active_workers() == workers()->total_workers(),
5714            "If not dynamic should be using all the  workers");
5715     workers()->run_task(&g1_par_task);




5716     end_par_time_sec = os::elapsedTime();
5717 
5718     // Closing the inner scope will execute the destructor
5719     // for the StrongRootsScope object. We record the current
5720     // elapsed time before closing the scope so that time
5721     // taken for the SRS destructor is NOT included in the
5722     // reported parallel time.
5723   }
5724 
5725   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5726   g1_policy()->phase_times()->record_par_time(par_time_ms);
5727 
5728   double code_root_fixup_time_ms =
5729         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5730   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5731 
5732   set_par_threads(0);
5733 
5734   // Process any discovered reference objects - we have
5735   // to do this _before_ we retire the GC alloc regions


6001   }
6002 };
6003 
6004 void G1CollectedHeap::check_bitmaps(const char* caller) {
6005   if (!G1VerifyBitmaps) return;
6006 
6007   G1VerifyBitmapClosure cl(caller, this);
6008   heap_region_iterate(&cl);
6009   guarantee(!cl.failures(), "bitmap verification");
6010 }
6011 #endif // PRODUCT
6012 
6013 void G1CollectedHeap::cleanUpCardTable() {
6014   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6015   double start = os::elapsedTime();
6016 
6017   {
6018     // Iterate over the dirty cards region list.
6019     G1ParCleanupCTTask cleanup_task(ct_bs, this);
6020 

6021     set_par_threads();
6022     workers()->run_task(&cleanup_task);
6023     set_par_threads(0);












6024 #ifndef PRODUCT
6025     if (G1VerifyCTCleanup || VerifyAfterGC) {
6026       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
6027       heap_region_iterate(&cleanup_verifier);
6028     }
6029 #endif
6030   }
6031 
6032   double elapsed = os::elapsedTime() - start;
6033   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
6034 }
6035 
6036 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
6037   size_t pre_used = 0;
6038   FreeRegionList local_free_list("Local List for CSet Freeing");
6039 
6040   double young_time_ms     = 0.0;
6041   double non_young_time_ms = 0.0;
6042 
6043   // Since the collection set is a superset of the the young list,


6543   return NULL;
6544 }
6545 
6546 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6547                                                   size_t allocated_bytes) {
6548   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6549   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6550 
6551   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6552   _allocator->increase_used(allocated_bytes);
6553   _hr_printer.retire(alloc_region);
6554   // We update the eden sizes here, when the region is retired,
6555   // instead of when it's allocated, since this is the point that its
6556   // used space has been recored in _summary_bytes_used.
6557   g1mm()->update_eden_size();
6558 }
6559 
6560 void G1CollectedHeap::set_par_threads() {
6561   // Don't change the number of workers.  Use the value previously set
6562   // in the workgroup.

6563   uint n_workers = workers()->active_workers();
6564   assert(UseDynamicNumberOfGCThreads ||
6565            n_workers == workers()->total_workers(),
6566       "Otherwise should be using the total number of workers");
6567   if (n_workers == 0) {
6568     assert(false, "Should have been set in prior evacuation pause.");
6569     n_workers = ParallelGCThreads;
6570     workers()->set_active_workers(n_workers);
6571   }
6572   set_par_threads(n_workers);
6573 }
6574 
6575 // Methods for the GC alloc regions
6576 
6577 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6578                                                  uint count,
6579                                                  GCAllocPurpose ap) {
6580   assert(FreeList_lock->owned_by_self(), "pre-condition");
6581 
6582   if (count < g1_policy()->max_regions(ap)) {


index