1419
1420 // Resize the heap if necessary.
1421 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1422
1423 if (_hr_printer.is_active()) {
1424 // We should do this after we potentially resize the heap so
1425 // that all the COMMIT / UNCOMMIT events are generated before
1426 // the end GC event.
1427
1428 print_hrm_post_compaction();
1429 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1430 }
1431
1432 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1433 if (hot_card_cache->use_cache()) {
1434 hot_card_cache->reset_card_counts();
1435 hot_card_cache->reset_hot_cache();
1436 }
1437
1438 // Rebuild remembered sets of all regions.
1439 if (G1CollectedHeap::use_parallel_gc_threads()) {
1440 uint n_workers =
1441 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1442 workers()->active_workers(),
1443 Threads::number_of_non_daemon_threads());
1444 assert(UseDynamicNumberOfGCThreads ||
1445 n_workers == workers()->total_workers(),
1446 "If not dynamic should be using all the workers");
1447 workers()->set_active_workers(n_workers);
1448 // Set parallel threads in the heap (_n_par_threads) only
1449 // before a parallel phase and always reset it to 0 after
1450 // the phase so that the number of parallel threads does
1451 // no get carried forward to a serial phase where there
1452 // may be code that is "possibly_parallel".
1453 set_par_threads(n_workers);
1454
1455 ParRebuildRSTask rebuild_rs_task(this);
1456 assert(UseDynamicNumberOfGCThreads ||
1457 workers()->active_workers() == workers()->total_workers(),
1458 "Unless dynamic should use total workers");
1459 // Use the most recent number of active workers
1460 assert(workers()->active_workers() > 0,
1461 "Active workers not properly set");
1462 set_par_threads(workers()->active_workers());
1463 workers()->run_task(&rebuild_rs_task);
1464 set_par_threads(0);
1465 } else {
1466 RebuildRSOutOfRegionClosure rebuild_rs(this);
1467 heap_region_iterate(&rebuild_rs);
1468 }
1469
1470 // Rebuild the strong code root lists for each region
1471 rebuild_strong_code_roots();
1472
1473 if (true) { // FIXME
1474 MetaspaceGC::compute_new_size();
1475 }
1476
1477 #ifdef TRACESPINNING
1478 ParallelTaskTerminator::print_termination_counts();
1479 #endif
1480
1481 // Discard all rset updates
1482 JavaThread::dirty_card_queue_set().abandon_logs();
1483 assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1484
1485 _young_list->reset_sampled_info();
1486 // At this point there should be no regions in the
1487 // entire heap tagged as young.
1488 assert(check_young_list_empty(true /* check_heap */),
2659 // during the current pause - so it's valid.
2660 // Note: the cached starting heap region may be NULL
2661 // (when the collection set is empty).
2662 result = _worker_cset_start_region[worker_i];
2663 assert(result == NULL || result->in_collection_set(), "sanity");
2664 return result;
2665 }
2666
2667 // The cached entry was not valid so let's calculate
2668 // a suitable starting heap region for this worker.
2669
2670 // We want the parallel threads to start their collection
2671 // set iteration at different collection set regions to
2672 // avoid contention.
2673 // If we have:
2674 // n collection set regions
2675 // p threads
2676 // Then thread t will start at region floor ((t * n) / p)
2677
2678 result = g1_policy()->collection_set();
2679 if (G1CollectedHeap::use_parallel_gc_threads()) {
2680 uint cs_size = g1_policy()->cset_region_length();
2681 uint active_workers = workers()->active_workers();
2682 assert(UseDynamicNumberOfGCThreads ||
2683 active_workers == workers()->total_workers(),
2684 "Unless dynamic should use total workers");
2685
2686 uint end_ind = (cs_size * worker_i) / active_workers;
2687 uint start_ind = 0;
2688
2689 if (worker_i > 0 &&
2690 _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2691 // Previous workers starting region is valid
2692 // so let's iterate from there
2693 start_ind = (cs_size * (worker_i - 1)) / active_workers;
2694 result = _worker_cset_start_region[worker_i - 1];
2695 }
2696
2697 for (uint i = start_ind; i < end_ind; i++) {
2698 result = result->next_in_collection_set();
2699 }
2700 }
2701
2702 // Note: the calculated starting heap region may be NULL
2703 // (when the collection set is empty).
2704 assert(result == NULL || result->in_collection_set(), "sanity");
2705 assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2706 "should be updated only once per pause");
2707 _worker_cset_start_region[worker_i] = result;
2708 OrderAccess::storestore();
2709 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2710 return result;
2711 }
2712
2713 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2714 HeapRegion* r = g1_policy()->collection_set();
2715 while (r != NULL) {
2716 HeapRegion* next = r->next_in_collection_set();
2717 if (cl->doHeapRegion(r)) {
2718 cl->incomplete();
2719 return;
2720 }
3354 st->cr();
3355 st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3356 "HS=humongous(starts), HC=humongous(continues), "
3357 "CS=collection set, F=free, TS=gc time stamp, "
3358 "PTAMS=previous top-at-mark-start, "
3359 "NTAMS=next top-at-mark-start)");
3360 PrintRegionClosure blk(st);
3361 heap_region_iterate(&blk);
3362 }
3363
3364 void G1CollectedHeap::print_on_error(outputStream* st) const {
3365 this->CollectedHeap::print_on_error(st);
3366
3367 if (_cm != NULL) {
3368 st->cr();
3369 _cm->print_on_error(st);
3370 }
3371 }
3372
3373 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3374 if (G1CollectedHeap::use_parallel_gc_threads()) {
3375 workers()->print_worker_threads_on(st);
3376 }
3377 _cmThread->print_on(st);
3378 st->cr();
3379 _cm->print_worker_threads_on(st);
3380 _cg1r->print_worker_threads_on(st);
3381 if (G1StringDedup::is_enabled()) {
3382 G1StringDedup::print_worker_threads_on(st);
3383 }
3384 }
3385
3386 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3387 if (G1CollectedHeap::use_parallel_gc_threads()) {
3388 workers()->threads_do(tc);
3389 }
3390 tc->do_thread(_cmThread);
3391 _cg1r->threads_do(tc);
3392 if (G1StringDedup::is_enabled()) {
3393 G1StringDedup::threads_do(tc);
3394 }
3395 }
3396
3397 void G1CollectedHeap::print_tracing_info() const {
3398 // We'll overload this to mean "trace GC pause statistics."
3399 if (TraceYoungGenTime || TraceOldGenTime) {
3400 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3401 // to that.
3402 g1_policy()->print_tracing_info();
3403 }
3404 if (G1SummarizeRSetStats) {
3405 g1_rem_set()->print_summary_info();
3406 }
3407 if (G1SummarizeConcMark) {
3408 concurrent_mark()->print_summary_info();
3409 }
3664 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3665
3666 // Here's a good place to add any other checks we'd like to
3667 // perform on CSet regions.
3668 return false;
3669 }
3670 };
3671 #endif // ASSERT
3672
3673 #if TASKQUEUE_STATS
3674 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3675 st->print_raw_cr("GC Task Stats");
3676 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3677 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3678 }
3679
3680 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3681 print_taskqueue_stats_hdr(st);
3682
3683 TaskQueueStats totals;
3684 const int n = workers() != NULL ? workers()->total_workers() : 1;
3685 for (int i = 0; i < n; ++i) {
3686 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3687 totals += task_queue(i)->stats;
3688 }
3689 st->print_raw("tot "); totals.print(st); st->cr();
3690
3691 DEBUG_ONLY(totals.verify());
3692 }
3693
3694 void G1CollectedHeap::reset_taskqueue_stats() {
3695 const int n = workers() != NULL ? workers()->total_workers() : 1;
3696 for (int i = 0; i < n; ++i) {
3697 task_queue(i)->stats.reset();
3698 }
3699 }
3700 #endif // TASKQUEUE_STATS
3701
3702 void G1CollectedHeap::log_gc_header() {
3703 if (!G1Log::fine()) {
3704 return;
3705 }
3706
3707 gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3708
3709 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3710 .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3711 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3712
3713 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3714 }
3715
3773 // Record whether this pause is an initial mark. When the current
3774 // thread has completed its logging output and it's safe to signal
3775 // the CM thread, the flag's value in the policy has been reset.
3776 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3777
3778 // Inner scope for scope based logging, timers, and stats collection
3779 {
3780 EvacuationInfo evacuation_info;
3781
3782 if (g1_policy()->during_initial_mark_pause()) {
3783 // We are about to start a marking cycle, so we increment the
3784 // full collection counter.
3785 increment_old_marking_cycles_started();
3786 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3787 }
3788
3789 _gc_tracer_stw->report_yc_type(yc_type());
3790
3791 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3792
3793 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3794 workers()->active_workers() : 1);
3795 double pause_start_sec = os::elapsedTime();
3796 g1_policy()->phase_times()->note_gc_start(active_workers);
3797 log_gc_header();
3798
3799 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3800 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3801
3802 // If the secondary_free_list is not empty, append it to the
3803 // free_list. No need to wait for the cleanup operation to finish;
3804 // the region allocation code will check the secondary_free_list
3805 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3806 // set, skip this step so that the region allocation code has to
3807 // get entries from the secondary_free_list.
3808 if (!G1StressConcRegionFreeing) {
3809 append_secondary_free_list_if_not_empty_with_lock();
3810 }
3811
3812 assert(check_young_list_well_formed(), "young list should be well formed");
3813
3814 // Don't dynamically change the number of GC threads this early. A value of
4768
4769 g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4770
4771 _process_strong_tasks->all_tasks_completed();
4772 }
4773
4774 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4775 private:
4776 BoolObjectClosure* _is_alive;
4777 int _initial_string_table_size;
4778 int _initial_symbol_table_size;
4779
4780 bool _process_strings;
4781 int _strings_processed;
4782 int _strings_removed;
4783
4784 bool _process_symbols;
4785 int _symbols_processed;
4786 int _symbols_removed;
4787
4788 bool _do_in_parallel;
4789 public:
4790 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4791 AbstractGangTask("String/Symbol Unlinking"),
4792 _is_alive(is_alive),
4793 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
4794 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4795 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4796
4797 _initial_string_table_size = StringTable::the_table()->table_size();
4798 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4799 if (process_strings) {
4800 StringTable::clear_parallel_claimed_index();
4801 }
4802 if (process_symbols) {
4803 SymbolTable::clear_parallel_claimed_index();
4804 }
4805 }
4806
4807 ~G1StringSymbolTableUnlinkTask() {
4808 guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4809 err_msg("claim value %d after unlink less than initial string table size %d",
4810 StringTable::parallel_claimed_index(), _initial_string_table_size));
4811 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4812 err_msg("claim value %d after unlink less than initial symbol table size %d",
4813 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4814
4815 if (G1TraceStringSymbolTableScrubbing) {
4816 gclog_or_tty->print_cr("Cleaned string and symbol table, "
4817 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
4818 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
4819 strings_processed(), strings_removed(),
4820 symbols_processed(), symbols_removed());
4821 }
4822 }
4823
4824 void work(uint worker_id) {
4825 if (_do_in_parallel) {
4826 int strings_processed = 0;
4827 int strings_removed = 0;
4828 int symbols_processed = 0;
4829 int symbols_removed = 0;
4830 if (_process_strings) {
4831 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4832 Atomic::add(strings_processed, &_strings_processed);
4833 Atomic::add(strings_removed, &_strings_removed);
4834 }
4835 if (_process_symbols) {
4836 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4837 Atomic::add(symbols_processed, &_symbols_processed);
4838 Atomic::add(symbols_removed, &_symbols_removed);
4839 }
4840 } else {
4841 if (_process_strings) {
4842 StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
4843 }
4844 if (_process_symbols) {
4845 SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
4846 }
4847 }
4848 }
4849
4850 size_t strings_processed() const { return (size_t)_strings_processed; }
4851 size_t strings_removed() const { return (size_t)_strings_removed; }
4852
4853 size_t symbols_processed() const { return (size_t)_symbols_processed; }
4854 size_t symbols_removed() const { return (size_t)_symbols_removed; }
4855 };
4856
4857 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
4858 private:
4859 static Monitor* _lock;
4860
4861 BoolObjectClosure* const _is_alive;
4862 const bool _unloading_occurred;
4863 const uint _num_workers;
4864
4865 // Variables used to claim nmethods.
4866 nmethod* _first_nmethod;
4867 volatile nmethod* _claimed_nmethod;
5108 // Clean the Strings and Symbols.
5109 _string_symbol_task.work(worker_id);
5110
5111 // Wait for all workers to finish the first code cache cleaning pass.
5112 _code_cache_task.barrier_wait(worker_id);
5113
5114 // Do the second code cache cleaning work, which realize on
5115 // the liveness information gathered during the first pass.
5116 _code_cache_task.work_second_pass(worker_id);
5117
5118 // Clean all klasses that were not unloaded.
5119 _klass_cleaning_task.work();
5120 }
5121 };
5122
5123
5124 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5125 bool process_strings,
5126 bool process_symbols,
5127 bool class_unloading_occurred) {
5128 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5129 workers()->active_workers() : 1);
5130
5131 G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5132 n_workers, class_unloading_occurred);
5133 if (G1CollectedHeap::use_parallel_gc_threads()) {
5134 set_par_threads(n_workers);
5135 workers()->run_task(&g1_unlink_task);
5136 set_par_threads(0);
5137 } else {
5138 g1_unlink_task.work(0);
5139 }
5140 }
5141
5142 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5143 bool process_strings, bool process_symbols) {
5144 {
5145 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5146 _g1h->workers()->active_workers() : 1);
5147 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5148 if (G1CollectedHeap::use_parallel_gc_threads()) {
5149 set_par_threads(n_workers);
5150 workers()->run_task(&g1_unlink_task);
5151 set_par_threads(0);
5152 } else {
5153 g1_unlink_task.work(0);
5154 }
5155 }
5156
5157 if (G1StringDedup::is_enabled()) {
5158 G1StringDedup::unlink(is_alive);
5159 }
5160 }
5161
5162 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5163 private:
5164 DirtyCardQueueSet* _queue;
5165 public:
5166 G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5167
5168 virtual void work(uint worker_id) {
5169 double start_time = os::elapsedTime();
5170
5171 RedirtyLoggedCardTableEntryClosure cl;
5172 if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5173 _queue->par_apply_closure_to_all_completed_buffers(&cl);
5174 } else {
5175 _queue->apply_closure_to_all_completed_buffers(&cl);
5176 }
5177
5178 G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5179 timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5180 timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5181 }
5182 };
5183
5184 void G1CollectedHeap::redirty_logged_cards() {
5185 double redirty_logged_cards_start = os::elapsedTime();
5186
5187 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5188 _g1h->workers()->active_workers() : 1);
5189
5190 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5191 dirty_card_queue_set().reset_for_par_iteration();
5192 if (use_parallel_gc_threads()) {
5193 set_par_threads(n_workers);
5194 workers()->run_task(&redirty_task);
5195 set_par_threads(0);
5196 } else {
5197 redirty_task.work(0);
5198 }
5199
5200 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5201 dcq.merge_bufferlists(&dirty_card_queue_set());
5202 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5203
5204 g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5205 }
5206
5207 // Weak Reference Processing support
5208
5209 // An always "is_alive" closure that is used to preserve referents.
5210 // If the object is non-null then it's alive. Used in the preservation
5211 // of referent objects that are pointed to by reference objects
5212 // discovered by the CM ref processor.
5213 class G1AlwaysAliveClosure: public BoolObjectClosure {
5214 G1CollectedHeap* _g1;
5215 public:
5216 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5217 bool do_object_b(oop p) {
5218 if (p != NULL) {
5568 // by the CM ref processor should have already been copied (either by
5569 // applying the external root copy closure to the discovered lists, or
5570 // by following an RSet entry).
5571 //
5572 // But some of the referents, that are in the collection set, that these
5573 // reference objects point to may not have been copied: the STW ref
5574 // processor would have seen that the reference object had already
5575 // been 'discovered' and would have skipped discovering the reference,
5576 // but would not have treated the reference object as a regular oop.
5577 // As a result the copy closure would not have been applied to the
5578 // referent object.
5579 //
5580 // We need to explicitly copy these referent objects - the references
5581 // will be processed at the end of remarking.
5582 //
5583 // We also need to do this copying before we process the reference
5584 // objects discovered by the STW ref processor in case one of these
5585 // referents points to another object which is also referenced by an
5586 // object discovered by the STW ref processor.
5587
5588 assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5589 no_of_gc_workers == workers()->active_workers(),
5590 "Need to reset active GC workers");
5591
5592 set_par_threads(no_of_gc_workers);
5593 G1ParPreserveCMReferentsTask keep_cm_referents(this,
5594 no_of_gc_workers,
5595 _task_queues);
5596
5597 if (G1CollectedHeap::use_parallel_gc_threads()) {
5598 workers()->run_task(&keep_cm_referents);
5599 } else {
5600 keep_cm_referents.work(0);
5601 }
5602
5603 set_par_threads(0);
5604
5605 // Closure to test whether a referent is alive.
5606 G1STWIsAliveClosure is_alive(this);
5607
5608 // Even when parallel reference processing is enabled, the processing
5609 // of JNI refs is serial and performed serially by the current thread
5610 // rather than by a worker. The following PSS will be used for processing
5611 // JNI refs.
5612
5613 // Use only a single queue for this PSS.
5614 G1ParScanThreadState pss(this, 0, NULL);
5615
5616 // We do not embed a reference processor in the copying/scanning
5617 // closures while we're actually processing the discovered
5618 // reference objects.
5619 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5620
5621 pss.set_evac_failure_closure(&evac_failure_cl);
5708
5709 double ref_enq_time = os::elapsedTime() - ref_enq_start;
5710 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5711 }
5712
5713 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5714 _expand_heap_after_alloc_failure = true;
5715 _evacuation_failed = false;
5716
5717 // Should G1EvacuationFailureALot be in effect for this GC?
5718 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5719
5720 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5721
5722 // Disable the hot card cache.
5723 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5724 hot_card_cache->reset_hot_cache_claimed_index();
5725 hot_card_cache->set_use_cache(false);
5726
5727 uint n_workers;
5728 if (G1CollectedHeap::use_parallel_gc_threads()) {
5729 n_workers =
5730 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5731 workers()->active_workers(),
5732 Threads::number_of_non_daemon_threads());
5733 assert(UseDynamicNumberOfGCThreads ||
5734 n_workers == workers()->total_workers(),
5735 "If not dynamic should be using all the workers");
5736 workers()->set_active_workers(n_workers);
5737 set_par_threads(n_workers);
5738 } else {
5739 assert(n_par_threads() == 0,
5740 "Should be the original non-parallel value");
5741 n_workers = 1;
5742 }
5743
5744 G1ParTask g1_par_task(this, _task_queues);
5745
5746 init_for_evac_failure(NULL);
5747
5748 rem_set()->prepare_for_younger_refs_iterate(true);
5749
5750 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5751 double start_par_time_sec = os::elapsedTime();
5752 double end_par_time_sec;
5753
5754 {
5755 StrongRootsScope srs(this);
5756 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5757 if (g1_policy()->during_initial_mark_pause()) {
5758 ClassLoaderDataGraph::clear_claimed_marks();
5759 }
5760
5761 if (G1CollectedHeap::use_parallel_gc_threads()) {
5762 // The individual threads will set their evac-failure closures.
5763 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5764 // These tasks use ShareHeap::_process_strong_tasks
5765 assert(UseDynamicNumberOfGCThreads ||
5766 workers()->active_workers() == workers()->total_workers(),
5767 "If not dynamic should be using all the workers");
5768 workers()->run_task(&g1_par_task);
5769 } else {
5770 g1_par_task.set_for_termination(n_workers);
5771 g1_par_task.work(0);
5772 }
5773 end_par_time_sec = os::elapsedTime();
5774
5775 // Closing the inner scope will execute the destructor
5776 // for the StrongRootsScope object. We record the current
5777 // elapsed time before closing the scope so that time
5778 // taken for the SRS destructor is NOT included in the
5779 // reported parallel time.
5780 }
5781
5782 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5783 g1_policy()->phase_times()->record_par_time(par_time_ms);
5784
5785 double code_root_fixup_time_ms =
5786 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5787 g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5788
5789 set_par_threads(0);
5790
5791 // Process any discovered reference objects - we have
5792 // to do this _before_ we retire the GC alloc regions
6058 }
6059 };
6060
6061 void G1CollectedHeap::check_bitmaps(const char* caller) {
6062 if (!G1VerifyBitmaps) return;
6063
6064 G1VerifyBitmapClosure cl(caller, this);
6065 heap_region_iterate(&cl);
6066 guarantee(!cl.failures(), "bitmap verification");
6067 }
6068 #endif // PRODUCT
6069
6070 void G1CollectedHeap::cleanUpCardTable() {
6071 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6072 double start = os::elapsedTime();
6073
6074 {
6075 // Iterate over the dirty cards region list.
6076 G1ParCleanupCTTask cleanup_task(ct_bs, this);
6077
6078 if (G1CollectedHeap::use_parallel_gc_threads()) {
6079 set_par_threads();
6080 workers()->run_task(&cleanup_task);
6081 set_par_threads(0);
6082 } else {
6083 while (_dirty_cards_region_list) {
6084 HeapRegion* r = _dirty_cards_region_list;
6085 cleanup_task.clear_cards(r);
6086 _dirty_cards_region_list = r->get_next_dirty_cards_region();
6087 if (_dirty_cards_region_list == r) {
6088 // The last region.
6089 _dirty_cards_region_list = NULL;
6090 }
6091 r->set_next_dirty_cards_region(NULL);
6092 }
6093 }
6094 #ifndef PRODUCT
6095 if (G1VerifyCTCleanup || VerifyAfterGC) {
6096 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
6097 heap_region_iterate(&cleanup_verifier);
6098 }
6099 #endif
6100 }
6101
6102 double elapsed = os::elapsedTime() - start;
6103 g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
6104 }
6105
6106 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
6107 size_t pre_used = 0;
6108 FreeRegionList local_free_list("Local List for CSet Freeing");
6109
6110 double young_time_ms = 0.0;
6111 double non_young_time_ms = 0.0;
6112
6113 // Since the collection set is a superset of the the young list,
6610 return NULL;
6611 }
6612
6613 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6614 size_t allocated_bytes) {
6615 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6616 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6617
6618 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6619 _allocator->increase_used(allocated_bytes);
6620 _hr_printer.retire(alloc_region);
6621 // We update the eden sizes here, when the region is retired,
6622 // instead of when it's allocated, since this is the point that its
6623 // used space has been recored in _summary_bytes_used.
6624 g1mm()->update_eden_size();
6625 }
6626
6627 void G1CollectedHeap::set_par_threads() {
6628 // Don't change the number of workers. Use the value previously set
6629 // in the workgroup.
6630 assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6631 uint n_workers = workers()->active_workers();
6632 assert(UseDynamicNumberOfGCThreads ||
6633 n_workers == workers()->total_workers(),
6634 "Otherwise should be using the total number of workers");
6635 if (n_workers == 0) {
6636 assert(false, "Should have been set in prior evacuation pause.");
6637 n_workers = ParallelGCThreads;
6638 workers()->set_active_workers(n_workers);
6639 }
6640 set_par_threads(n_workers);
6641 }
6642
6643 // Methods for the GC alloc regions
6644
6645 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6646 uint count,
6647 GCAllocPurpose ap) {
6648 assert(FreeList_lock->owned_by_self(), "pre-condition");
6649
6650 if (count < g1_policy()->max_regions(ap)) {
|
1419
1420 // Resize the heap if necessary.
1421 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1422
1423 if (_hr_printer.is_active()) {
1424 // We should do this after we potentially resize the heap so
1425 // that all the COMMIT / UNCOMMIT events are generated before
1426 // the end GC event.
1427
1428 print_hrm_post_compaction();
1429 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1430 }
1431
1432 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1433 if (hot_card_cache->use_cache()) {
1434 hot_card_cache->reset_card_counts();
1435 hot_card_cache->reset_hot_cache();
1436 }
1437
1438 // Rebuild remembered sets of all regions.
1439 uint n_workers =
1440 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1441 workers()->active_workers(),
1442 Threads::number_of_non_daemon_threads());
1443 assert(UseDynamicNumberOfGCThreads ||
1444 n_workers == workers()->total_workers(),
1445 "If not dynamic should be using all the workers");
1446 workers()->set_active_workers(n_workers);
1447 // Set parallel threads in the heap (_n_par_threads) only
1448 // before a parallel phase and always reset it to 0 after
1449 // the phase so that the number of parallel threads does
1450 // no get carried forward to a serial phase where there
1451 // may be code that is "possibly_parallel".
1452 set_par_threads(n_workers);
1453
1454 ParRebuildRSTask rebuild_rs_task(this);
1455 assert(UseDynamicNumberOfGCThreads ||
1456 workers()->active_workers() == workers()->total_workers(),
1457 "Unless dynamic should use total workers");
1458 // Use the most recent number of active workers
1459 assert(workers()->active_workers() > 0,
1460 "Active workers not properly set");
1461 set_par_threads(workers()->active_workers());
1462 workers()->run_task(&rebuild_rs_task);
1463 set_par_threads(0);
1464
1465 // Rebuild the strong code root lists for each region
1466 rebuild_strong_code_roots();
1467
1468 if (true) { // FIXME
1469 MetaspaceGC::compute_new_size();
1470 }
1471
1472 #ifdef TRACESPINNING
1473 ParallelTaskTerminator::print_termination_counts();
1474 #endif
1475
1476 // Discard all rset updates
1477 JavaThread::dirty_card_queue_set().abandon_logs();
1478 assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1479
1480 _young_list->reset_sampled_info();
1481 // At this point there should be no regions in the
1482 // entire heap tagged as young.
1483 assert(check_young_list_empty(true /* check_heap */),
2654 // during the current pause - so it's valid.
2655 // Note: the cached starting heap region may be NULL
2656 // (when the collection set is empty).
2657 result = _worker_cset_start_region[worker_i];
2658 assert(result == NULL || result->in_collection_set(), "sanity");
2659 return result;
2660 }
2661
2662 // The cached entry was not valid so let's calculate
2663 // a suitable starting heap region for this worker.
2664
2665 // We want the parallel threads to start their collection
2666 // set iteration at different collection set regions to
2667 // avoid contention.
2668 // If we have:
2669 // n collection set regions
2670 // p threads
2671 // Then thread t will start at region floor ((t * n) / p)
2672
2673 result = g1_policy()->collection_set();
2674 uint cs_size = g1_policy()->cset_region_length();
2675 uint active_workers = workers()->active_workers();
2676 assert(UseDynamicNumberOfGCThreads ||
2677 active_workers == workers()->total_workers(),
2678 "Unless dynamic should use total workers");
2679
2680 uint end_ind = (cs_size * worker_i) / active_workers;
2681 uint start_ind = 0;
2682
2683 if (worker_i > 0 &&
2684 _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2685 // Previous workers starting region is valid
2686 // so let's iterate from there
2687 start_ind = (cs_size * (worker_i - 1)) / active_workers;
2688 result = _worker_cset_start_region[worker_i - 1];
2689 }
2690
2691 for (uint i = start_ind; i < end_ind; i++) {
2692 result = result->next_in_collection_set();
2693 }
2694
2695 // Note: the calculated starting heap region may be NULL
2696 // (when the collection set is empty).
2697 assert(result == NULL || result->in_collection_set(), "sanity");
2698 assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2699 "should be updated only once per pause");
2700 _worker_cset_start_region[worker_i] = result;
2701 OrderAccess::storestore();
2702 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2703 return result;
2704 }
2705
2706 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2707 HeapRegion* r = g1_policy()->collection_set();
2708 while (r != NULL) {
2709 HeapRegion* next = r->next_in_collection_set();
2710 if (cl->doHeapRegion(r)) {
2711 cl->incomplete();
2712 return;
2713 }
3347 st->cr();
3348 st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3349 "HS=humongous(starts), HC=humongous(continues), "
3350 "CS=collection set, F=free, TS=gc time stamp, "
3351 "PTAMS=previous top-at-mark-start, "
3352 "NTAMS=next top-at-mark-start)");
3353 PrintRegionClosure blk(st);
3354 heap_region_iterate(&blk);
3355 }
3356
3357 void G1CollectedHeap::print_on_error(outputStream* st) const {
3358 this->CollectedHeap::print_on_error(st);
3359
3360 if (_cm != NULL) {
3361 st->cr();
3362 _cm->print_on_error(st);
3363 }
3364 }
3365
3366 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3367 workers()->print_worker_threads_on(st);
3368 _cmThread->print_on(st);
3369 st->cr();
3370 _cm->print_worker_threads_on(st);
3371 _cg1r->print_worker_threads_on(st);
3372 if (G1StringDedup::is_enabled()) {
3373 G1StringDedup::print_worker_threads_on(st);
3374 }
3375 }
3376
3377 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3378 workers()->threads_do(tc);
3379 tc->do_thread(_cmThread);
3380 _cg1r->threads_do(tc);
3381 if (G1StringDedup::is_enabled()) {
3382 G1StringDedup::threads_do(tc);
3383 }
3384 }
3385
3386 void G1CollectedHeap::print_tracing_info() const {
3387 // We'll overload this to mean "trace GC pause statistics."
3388 if (TraceYoungGenTime || TraceOldGenTime) {
3389 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3390 // to that.
3391 g1_policy()->print_tracing_info();
3392 }
3393 if (G1SummarizeRSetStats) {
3394 g1_rem_set()->print_summary_info();
3395 }
3396 if (G1SummarizeConcMark) {
3397 concurrent_mark()->print_summary_info();
3398 }
3653 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3654
3655 // Here's a good place to add any other checks we'd like to
3656 // perform on CSet regions.
3657 return false;
3658 }
3659 };
3660 #endif // ASSERT
3661
3662 #if TASKQUEUE_STATS
3663 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3664 st->print_raw_cr("GC Task Stats");
3665 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3666 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3667 }
3668
3669 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3670 print_taskqueue_stats_hdr(st);
3671
3672 TaskQueueStats totals;
3673 const int n = workers()->total_workers();
3674 for (int i = 0; i < n; ++i) {
3675 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3676 totals += task_queue(i)->stats;
3677 }
3678 st->print_raw("tot "); totals.print(st); st->cr();
3679
3680 DEBUG_ONLY(totals.verify());
3681 }
3682
3683 void G1CollectedHeap::reset_taskqueue_stats() {
3684 const int n = workers()->total_workers();
3685 for (int i = 0; i < n; ++i) {
3686 task_queue(i)->stats.reset();
3687 }
3688 }
3689 #endif // TASKQUEUE_STATS
3690
3691 void G1CollectedHeap::log_gc_header() {
3692 if (!G1Log::fine()) {
3693 return;
3694 }
3695
3696 gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3697
3698 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3699 .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3700 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3701
3702 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3703 }
3704
3762 // Record whether this pause is an initial mark. When the current
3763 // thread has completed its logging output and it's safe to signal
3764 // the CM thread, the flag's value in the policy has been reset.
3765 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3766
3767 // Inner scope for scope based logging, timers, and stats collection
3768 {
3769 EvacuationInfo evacuation_info;
3770
3771 if (g1_policy()->during_initial_mark_pause()) {
3772 // We are about to start a marking cycle, so we increment the
3773 // full collection counter.
3774 increment_old_marking_cycles_started();
3775 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3776 }
3777
3778 _gc_tracer_stw->report_yc_type(yc_type());
3779
3780 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3781
3782 int active_workers = workers()->active_workers();
3783 double pause_start_sec = os::elapsedTime();
3784 g1_policy()->phase_times()->note_gc_start(active_workers);
3785 log_gc_header();
3786
3787 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3788 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3789
3790 // If the secondary_free_list is not empty, append it to the
3791 // free_list. No need to wait for the cleanup operation to finish;
3792 // the region allocation code will check the secondary_free_list
3793 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3794 // set, skip this step so that the region allocation code has to
3795 // get entries from the secondary_free_list.
3796 if (!G1StressConcRegionFreeing) {
3797 append_secondary_free_list_if_not_empty_with_lock();
3798 }
3799
3800 assert(check_young_list_well_formed(), "young list should be well formed");
3801
3802 // Don't dynamically change the number of GC threads this early. A value of
4756
4757 g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4758
4759 _process_strong_tasks->all_tasks_completed();
4760 }
4761
4762 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4763 private:
4764 BoolObjectClosure* _is_alive;
4765 int _initial_string_table_size;
4766 int _initial_symbol_table_size;
4767
4768 bool _process_strings;
4769 int _strings_processed;
4770 int _strings_removed;
4771
4772 bool _process_symbols;
4773 int _symbols_processed;
4774 int _symbols_removed;
4775
4776 public:
4777 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4778 AbstractGangTask("String/Symbol Unlinking"),
4779 _is_alive(is_alive),
4780 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4781 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4782
4783 _initial_string_table_size = StringTable::the_table()->table_size();
4784 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4785 if (process_strings) {
4786 StringTable::clear_parallel_claimed_index();
4787 }
4788 if (process_symbols) {
4789 SymbolTable::clear_parallel_claimed_index();
4790 }
4791 }
4792
4793 ~G1StringSymbolTableUnlinkTask() {
4794 guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4795 err_msg("claim value %d after unlink less than initial string table size %d",
4796 StringTable::parallel_claimed_index(), _initial_string_table_size));
4797 guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4798 err_msg("claim value %d after unlink less than initial symbol table size %d",
4799 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4800
4801 if (G1TraceStringSymbolTableScrubbing) {
4802 gclog_or_tty->print_cr("Cleaned string and symbol table, "
4803 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
4804 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
4805 strings_processed(), strings_removed(),
4806 symbols_processed(), symbols_removed());
4807 }
4808 }
4809
4810 void work(uint worker_id) {
4811 int strings_processed = 0;
4812 int strings_removed = 0;
4813 int symbols_processed = 0;
4814 int symbols_removed = 0;
4815 if (_process_strings) {
4816 StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4817 Atomic::add(strings_processed, &_strings_processed);
4818 Atomic::add(strings_removed, &_strings_removed);
4819 }
4820 if (_process_symbols) {
4821 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4822 Atomic::add(symbols_processed, &_symbols_processed);
4823 Atomic::add(symbols_removed, &_symbols_removed);
4824 }
4825 }
4826
4827 size_t strings_processed() const { return (size_t)_strings_processed; }
4828 size_t strings_removed() const { return (size_t)_strings_removed; }
4829
4830 size_t symbols_processed() const { return (size_t)_symbols_processed; }
4831 size_t symbols_removed() const { return (size_t)_symbols_removed; }
4832 };
4833
4834 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
4835 private:
4836 static Monitor* _lock;
4837
4838 BoolObjectClosure* const _is_alive;
4839 const bool _unloading_occurred;
4840 const uint _num_workers;
4841
4842 // Variables used to claim nmethods.
4843 nmethod* _first_nmethod;
4844 volatile nmethod* _claimed_nmethod;
5085 // Clean the Strings and Symbols.
5086 _string_symbol_task.work(worker_id);
5087
5088 // Wait for all workers to finish the first code cache cleaning pass.
5089 _code_cache_task.barrier_wait(worker_id);
5090
5091 // Do the second code cache cleaning work, which realize on
5092 // the liveness information gathered during the first pass.
5093 _code_cache_task.work_second_pass(worker_id);
5094
5095 // Clean all klasses that were not unloaded.
5096 _klass_cleaning_task.work();
5097 }
5098 };
5099
5100
5101 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5102 bool process_strings,
5103 bool process_symbols,
5104 bool class_unloading_occurred) {
5105 uint n_workers = workers()->active_workers();
5106
5107 G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5108 n_workers, class_unloading_occurred);
5109 set_par_threads(n_workers);
5110 workers()->run_task(&g1_unlink_task);
5111 set_par_threads(0);
5112 }
5113
5114 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5115 bool process_strings, bool process_symbols) {
5116 {
5117 uint n_workers = _g1h->workers()->active_workers();
5118 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5119 set_par_threads(n_workers);
5120 workers()->run_task(&g1_unlink_task);
5121 set_par_threads(0);
5122 }
5123
5124 if (G1StringDedup::is_enabled()) {
5125 G1StringDedup::unlink(is_alive);
5126 }
5127 }
5128
5129 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5130 private:
5131 DirtyCardQueueSet* _queue;
5132 public:
5133 G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5134
5135 virtual void work(uint worker_id) {
5136 double start_time = os::elapsedTime();
5137
5138 RedirtyLoggedCardTableEntryClosure cl;
5139 _queue->par_apply_closure_to_all_completed_buffers(&cl);
5140
5141 G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5142 timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5143 timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5144 }
5145 };
5146
5147 void G1CollectedHeap::redirty_logged_cards() {
5148 double redirty_logged_cards_start = os::elapsedTime();
5149
5150 uint n_workers = _g1h->workers()->active_workers();
5151
5152 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5153 dirty_card_queue_set().reset_for_par_iteration();
5154 set_par_threads(n_workers);
5155 workers()->run_task(&redirty_task);
5156 set_par_threads(0);
5157
5158 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5159 dcq.merge_bufferlists(&dirty_card_queue_set());
5160 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5161
5162 g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5163 }
5164
5165 // Weak Reference Processing support
5166
5167 // An always "is_alive" closure that is used to preserve referents.
5168 // If the object is non-null then it's alive. Used in the preservation
5169 // of referent objects that are pointed to by reference objects
5170 // discovered by the CM ref processor.
5171 class G1AlwaysAliveClosure: public BoolObjectClosure {
5172 G1CollectedHeap* _g1;
5173 public:
5174 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5175 bool do_object_b(oop p) {
5176 if (p != NULL) {
5526 // by the CM ref processor should have already been copied (either by
5527 // applying the external root copy closure to the discovered lists, or
5528 // by following an RSet entry).
5529 //
5530 // But some of the referents, that are in the collection set, that these
5531 // reference objects point to may not have been copied: the STW ref
5532 // processor would have seen that the reference object had already
5533 // been 'discovered' and would have skipped discovering the reference,
5534 // but would not have treated the reference object as a regular oop.
5535 // As a result the copy closure would not have been applied to the
5536 // referent object.
5537 //
5538 // We need to explicitly copy these referent objects - the references
5539 // will be processed at the end of remarking.
5540 //
5541 // We also need to do this copying before we process the reference
5542 // objects discovered by the STW ref processor in case one of these
5543 // referents points to another object which is also referenced by an
5544 // object discovered by the STW ref processor.
5545
5546 assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers");
5547
5548 set_par_threads(no_of_gc_workers);
5549 G1ParPreserveCMReferentsTask keep_cm_referents(this,
5550 no_of_gc_workers,
5551 _task_queues);
5552
5553 workers()->run_task(&keep_cm_referents);
5554
5555 set_par_threads(0);
5556
5557 // Closure to test whether a referent is alive.
5558 G1STWIsAliveClosure is_alive(this);
5559
5560 // Even when parallel reference processing is enabled, the processing
5561 // of JNI refs is serial and performed serially by the current thread
5562 // rather than by a worker. The following PSS will be used for processing
5563 // JNI refs.
5564
5565 // Use only a single queue for this PSS.
5566 G1ParScanThreadState pss(this, 0, NULL);
5567
5568 // We do not embed a reference processor in the copying/scanning
5569 // closures while we're actually processing the discovered
5570 // reference objects.
5571 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5572
5573 pss.set_evac_failure_closure(&evac_failure_cl);
5660
5661 double ref_enq_time = os::elapsedTime() - ref_enq_start;
5662 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5663 }
5664
5665 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5666 _expand_heap_after_alloc_failure = true;
5667 _evacuation_failed = false;
5668
5669 // Should G1EvacuationFailureALot be in effect for this GC?
5670 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5671
5672 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5673
5674 // Disable the hot card cache.
5675 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5676 hot_card_cache->reset_hot_cache_claimed_index();
5677 hot_card_cache->set_use_cache(false);
5678
5679 uint n_workers;
5680 n_workers =
5681 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5682 workers()->active_workers(),
5683 Threads::number_of_non_daemon_threads());
5684 assert(UseDynamicNumberOfGCThreads ||
5685 n_workers == workers()->total_workers(),
5686 "If not dynamic should be using all the workers");
5687 workers()->set_active_workers(n_workers);
5688 set_par_threads(n_workers);
5689
5690 G1ParTask g1_par_task(this, _task_queues);
5691
5692 init_for_evac_failure(NULL);
5693
5694 rem_set()->prepare_for_younger_refs_iterate(true);
5695
5696 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5697 double start_par_time_sec = os::elapsedTime();
5698 double end_par_time_sec;
5699
5700 {
5701 StrongRootsScope srs(this);
5702 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5703 if (g1_policy()->during_initial_mark_pause()) {
5704 ClassLoaderDataGraph::clear_claimed_marks();
5705 }
5706
5707 // The individual threads will set their evac-failure closures.
5708 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5709 // These tasks use ShareHeap::_process_strong_tasks
5710 assert(UseDynamicNumberOfGCThreads ||
5711 workers()->active_workers() == workers()->total_workers(),
5712 "If not dynamic should be using all the workers");
5713 workers()->run_task(&g1_par_task);
5714 end_par_time_sec = os::elapsedTime();
5715
5716 // Closing the inner scope will execute the destructor
5717 // for the StrongRootsScope object. We record the current
5718 // elapsed time before closing the scope so that time
5719 // taken for the SRS destructor is NOT included in the
5720 // reported parallel time.
5721 }
5722
5723 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5724 g1_policy()->phase_times()->record_par_time(par_time_ms);
5725
5726 double code_root_fixup_time_ms =
5727 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5728 g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5729
5730 set_par_threads(0);
5731
5732 // Process any discovered reference objects - we have
5733 // to do this _before_ we retire the GC alloc regions
5999 }
6000 };
6001
6002 void G1CollectedHeap::check_bitmaps(const char* caller) {
6003 if (!G1VerifyBitmaps) return;
6004
6005 G1VerifyBitmapClosure cl(caller, this);
6006 heap_region_iterate(&cl);
6007 guarantee(!cl.failures(), "bitmap verification");
6008 }
6009 #endif // PRODUCT
6010
6011 void G1CollectedHeap::cleanUpCardTable() {
6012 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6013 double start = os::elapsedTime();
6014
6015 {
6016 // Iterate over the dirty cards region list.
6017 G1ParCleanupCTTask cleanup_task(ct_bs, this);
6018
6019 set_par_threads();
6020 workers()->run_task(&cleanup_task);
6021 set_par_threads(0);
6022 #ifndef PRODUCT
6023 if (G1VerifyCTCleanup || VerifyAfterGC) {
6024 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
6025 heap_region_iterate(&cleanup_verifier);
6026 }
6027 #endif
6028 }
6029
6030 double elapsed = os::elapsedTime() - start;
6031 g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
6032 }
6033
6034 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
6035 size_t pre_used = 0;
6036 FreeRegionList local_free_list("Local List for CSet Freeing");
6037
6038 double young_time_ms = 0.0;
6039 double non_young_time_ms = 0.0;
6040
6041 // Since the collection set is a superset of the the young list,
6538 return NULL;
6539 }
6540
6541 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6542 size_t allocated_bytes) {
6543 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6544 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6545
6546 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6547 _allocator->increase_used(allocated_bytes);
6548 _hr_printer.retire(alloc_region);
6549 // We update the eden sizes here, when the region is retired,
6550 // instead of when it's allocated, since this is the point that its
6551 // used space has been recored in _summary_bytes_used.
6552 g1mm()->update_eden_size();
6553 }
6554
6555 void G1CollectedHeap::set_par_threads() {
6556 // Don't change the number of workers. Use the value previously set
6557 // in the workgroup.
6558 uint n_workers = workers()->active_workers();
6559 assert(UseDynamicNumberOfGCThreads ||
6560 n_workers == workers()->total_workers(),
6561 "Otherwise should be using the total number of workers");
6562 if (n_workers == 0) {
6563 assert(false, "Should have been set in prior evacuation pause.");
6564 n_workers = ParallelGCThreads;
6565 workers()->set_active_workers(n_workers);
6566 }
6567 set_par_threads(n_workers);
6568 }
6569
6570 // Methods for the GC alloc regions
6571
6572 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6573 uint count,
6574 GCAllocPurpose ap) {
6575 assert(FreeList_lock->owned_by_self(), "pre-condition");
6576
6577 if (count < g1_policy()->max_regions(ap)) {
|