8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "classfile/symbolTable.hpp"
29 #include "code/codeCache.hpp"
30 #include "code/icBuffer.hpp"
31 #include "gc/g1/g1Allocator.inline.hpp"
32 #include "gc/g1/g1BarrierSet.hpp"
33 #include "gc/g1/g1CollectedHeap.inline.hpp"
34 #include "gc/g1/g1CollectionSet.hpp"
35 #include "gc/g1/g1CollectorPolicy.hpp"
36 #include "gc/g1/g1CollectorState.hpp"
37 #include "gc/g1/g1ConcurrentRefine.hpp"
38 #include "gc/g1/g1ConcurrentRefineThread.hpp"
39 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
40 #include "gc/g1/g1EvacStats.inline.hpp"
41 #include "gc/g1/g1FullCollector.hpp"
42 #include "gc/g1/g1GCPhaseTimes.hpp"
43 #include "gc/g1/g1HeapSizingPolicy.hpp"
44 #include "gc/g1/g1HeapTransition.hpp"
45 #include "gc/g1/g1HeapVerifier.hpp"
46 #include "gc/g1/g1HotCardCache.hpp"
47 #include "gc/g1/g1MemoryPool.hpp"
48 #include "gc/g1/g1OopClosures.inline.hpp"
3218 }
3219
3220 void G1CollectedHeap::print_termination_stats(uint worker_id,
3221 double elapsed_ms,
3222 double strong_roots_ms,
3223 double term_ms,
3224 size_t term_attempts,
3225 size_t alloc_buffer_waste,
3226 size_t undo_waste) const {
3227 log_debug(gc, task, stats)
3228 ("%3d %9.2f %9.2f %6.2f "
3229 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
3230 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
3231 worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
3232 term_ms, term_ms * 100 / elapsed_ms, term_attempts,
3233 (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
3234 alloc_buffer_waste * HeapWordSize / K,
3235 undo_waste * HeapWordSize / K);
3236 }
3237
3238 class G1StringAndSymbolCleaningTask : public AbstractGangTask {
3239 private:
3240 BoolObjectClosure* _is_alive;
3241 G1StringDedupUnlinkOrOopsDoClosure _dedup_closure;
3242 OopStorage::ParState<false /* concurrent */, false /* const */> _par_state_string;
3243
3244 int _initial_string_table_size;
3245 int _initial_symbol_table_size;
3246
3247 bool _process_strings;
3248 int _strings_processed;
3249 int _strings_removed;
3250
3251 bool _process_symbols;
3252 int _symbols_processed;
3253 int _symbols_removed;
3254
3255 bool _process_string_dedup;
3256
3257 public:
3258 G1StringAndSymbolCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool process_string_dedup) :
3259 AbstractGangTask("String/Symbol Unlinking"),
3260 _is_alive(is_alive),
3261 _dedup_closure(is_alive, NULL, false),
3262 _par_state_string(StringTable::weak_storage()),
3263 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
3264 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0),
3265 _process_string_dedup(process_string_dedup) {
3266
3267 _initial_string_table_size = (int) StringTable::the_table()->table_size();
3268 _initial_symbol_table_size = SymbolTable::the_table()->table_size();
3269 if (process_symbols) {
3270 SymbolTable::clear_parallel_claimed_index();
3271 }
3272 if (process_strings) {
3273 StringTable::reset_dead_counter();
3274 }
3275 }
3276
3277 ~G1StringAndSymbolCleaningTask() {
3278 guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
3279 "claim value %d after unlink less than initial symbol table size %d",
3280 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
3281
3282 log_info(gc, stringtable)(
3283 "Cleaned string and symbol table, "
3284 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
3285 "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
3286 strings_processed(), strings_removed(),
3287 symbols_processed(), symbols_removed());
3288 if (_process_strings) {
3289 StringTable::finish_dead_counter();
3290 }
3291 }
3292
3293 void work(uint worker_id) {
3294 int strings_processed = 0;
3295 int strings_removed = 0;
3296 int symbols_processed = 0;
3297 int symbols_removed = 0;
3298 if (_process_strings) {
3299 StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed);
3300 Atomic::add(strings_processed, &_strings_processed);
3301 Atomic::add(strings_removed, &_strings_removed);
3302 }
3303 if (_process_symbols) {
3304 SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
3305 Atomic::add(symbols_processed, &_symbols_processed);
3306 Atomic::add(symbols_removed, &_symbols_removed);
3307 }
3308 if (_process_string_dedup) {
3309 G1StringDedup::parallel_unlink(&_dedup_closure, worker_id);
3310 }
3311 }
3312
3313 size_t strings_processed() const { return (size_t)_strings_processed; }
3314 size_t strings_removed() const { return (size_t)_strings_removed; }
3315
3316 size_t symbols_processed() const { return (size_t)_symbols_processed; }
3317 size_t symbols_removed() const { return (size_t)_symbols_removed; }
3318 };
3319
3320 class G1CodeCacheUnloadingTask {
3321 private:
3322 static Monitor* _lock;
3323
3324 BoolObjectClosure* const _is_alive;
3325 const bool _unloading_occurred;
3326 const uint _num_workers;
3327
3328 // Variables used to claim nmethods.
3329 CompiledMethod* _first_nmethod;
3330 CompiledMethod* volatile _claimed_nmethod;
3331
3332 // The list of nmethods that need to be processed by the second pass.
3333 CompiledMethod* volatile _postponed_list;
3334 volatile uint _num_entered_barrier;
3335
3336 public:
3337 G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
3547 bool claim_resolved_method_task() {
3548 if (_resolved_method_task_claimed) {
3549 return false;
3550 }
3551 return Atomic::cmpxchg(1, &_resolved_method_task_claimed, 0) == 0;
3552 }
3553
3554 // These aren't big, one thread can do it all.
3555 void work() {
3556 if (claim_resolved_method_task()) {
3557 ResolvedMethodTable::unlink();
3558 }
3559 }
3560 };
3561
3562
3563 // To minimize the remark pause times, the tasks below are done in parallel.
3564 class G1ParallelCleaningTask : public AbstractGangTask {
3565 private:
3566 bool _unloading_occurred;
3567 G1StringAndSymbolCleaningTask _string_symbol_task;
3568 G1CodeCacheUnloadingTask _code_cache_task;
3569 G1KlassCleaningTask _klass_cleaning_task;
3570 G1ResolvedMethodCleaningTask _resolved_method_cleaning_task;
3571
3572 public:
3573 // The constructor is run in the VMThread.
3574 G1ParallelCleaningTask(BoolObjectClosure* is_alive, uint num_workers, bool unloading_occurred) :
3575 AbstractGangTask("Parallel Cleaning"),
3576 _string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()),
3577 _code_cache_task(num_workers, is_alive, unloading_occurred),
3578 _klass_cleaning_task(),
3579 _unloading_occurred(unloading_occurred),
3580 _resolved_method_cleaning_task() {
3581 }
3582
3583 // The parallel work done by all worker threads.
3584 void work(uint worker_id) {
3585 // Do first pass of code cache cleaning.
3586 _code_cache_task.work_first_pass(worker_id);
3587
3588 // Let the threads mark that the first pass is done.
3589 _code_cache_task.barrier_mark(worker_id);
3590
3591 // Clean the Strings and Symbols.
3592 _string_symbol_task.work(worker_id);
3593
3594 // Clean unreferenced things in the ResolvedMethodTable
3595 _resolved_method_cleaning_task.work();
3596
3597 // Wait for all workers to finish the first code cache cleaning pass.
3598 _code_cache_task.barrier_wait(worker_id);
3599
3600 // Do the second code cache cleaning work, which realize on
3601 // the liveness information gathered during the first pass.
3602 _code_cache_task.work_second_pass(worker_id);
3603
3604 // Clean all klasses that were not unloaded.
3605 // The weak metadata in klass doesn't need to be
3606 // processed if there was no unloading.
3607 if (_unloading_occurred) {
3608 _klass_cleaning_task.work();
3609 }
3610 }
3611 };
3612
3613
3614 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
3615 bool class_unloading_occurred) {
3616 uint n_workers = workers()->active_workers();
3617
3618 G1ParallelCleaningTask g1_unlink_task(is_alive, n_workers, class_unloading_occurred);
3619 workers()->run_task(&g1_unlink_task);
3620 }
3621
3622 void G1CollectedHeap::partial_cleaning(BoolObjectClosure* is_alive,
3623 bool process_strings,
3624 bool process_symbols,
3625 bool process_string_dedup) {
3626 if (!process_strings && !process_symbols && !process_string_dedup) {
3627 // Nothing to clean.
3628 return;
3629 }
3630
3631 G1StringAndSymbolCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols, process_string_dedup);
3632 workers()->run_task(&g1_unlink_task);
3633
3634 }
3635
3636 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3637 private:
3638 DirtyCardQueueSet* _queue;
3639 G1CollectedHeap* _g1h;
3640 public:
3641 G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3642 _queue(queue), _g1h(g1h) { }
3643
3644 virtual void work(uint worker_id) {
3645 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3646 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3647
3648 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3649 _queue->par_apply_closure_to_all_completed_buffers(&cl);
3650
3651 phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3652 }
3653 };
4007 phase_times->record_par_time(par_time_ms);
4008
4009 double code_root_fixup_time_ms =
4010 (os::elapsedTime() - end_par_time_sec) * 1000.0;
4011 phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4012 }
4013
4014 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4015 // Also cleans the card table from temporary duplicate detection information used
4016 // during UpdateRS/ScanRS.
4017 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4018
4019 // Process any discovered reference objects - we have
4020 // to do this _before_ we retire the GC alloc regions
4021 // as we may have to copy some 'reachable' referent
4022 // objects (and their reachable sub-graphs) that were
4023 // not copied during the pause.
4024 process_discovered_references(per_thread_states);
4025
4026 // FIXME
4027 // CM's reference processing also cleans up the string and symbol tables.
4028 // Should we do that here also? We could, but it is a serial operation
4029 // and could significantly increase the pause time.
4030
4031 G1STWIsAliveClosure is_alive(this);
4032 G1KeepAliveClosure keep_alive(this);
4033
4034 {
4035 double start = os::elapsedTime();
4036
4037 WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
4038
4039 double time_ms = (os::elapsedTime() - start) * 1000.0;
4040 g1_policy()->phase_times()->record_weak_ref_proc_time(time_ms);
4041 }
4042
4043 if (G1StringDedup::is_enabled()) {
4044 double fixup_start = os::elapsedTime();
4045
4046 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4047
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/icBuffer.hpp"
30 #include "gc/g1/g1Allocator.inline.hpp"
31 #include "gc/g1/g1BarrierSet.hpp"
32 #include "gc/g1/g1CollectedHeap.inline.hpp"
33 #include "gc/g1/g1CollectionSet.hpp"
34 #include "gc/g1/g1CollectorPolicy.hpp"
35 #include "gc/g1/g1CollectorState.hpp"
36 #include "gc/g1/g1ConcurrentRefine.hpp"
37 #include "gc/g1/g1ConcurrentRefineThread.hpp"
38 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
39 #include "gc/g1/g1EvacStats.inline.hpp"
40 #include "gc/g1/g1FullCollector.hpp"
41 #include "gc/g1/g1GCPhaseTimes.hpp"
42 #include "gc/g1/g1HeapSizingPolicy.hpp"
43 #include "gc/g1/g1HeapTransition.hpp"
44 #include "gc/g1/g1HeapVerifier.hpp"
45 #include "gc/g1/g1HotCardCache.hpp"
46 #include "gc/g1/g1MemoryPool.hpp"
47 #include "gc/g1/g1OopClosures.inline.hpp"
3217 }
3218
3219 void G1CollectedHeap::print_termination_stats(uint worker_id,
3220 double elapsed_ms,
3221 double strong_roots_ms,
3222 double term_ms,
3223 size_t term_attempts,
3224 size_t alloc_buffer_waste,
3225 size_t undo_waste) const {
3226 log_debug(gc, task, stats)
3227 ("%3d %9.2f %9.2f %6.2f "
3228 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
3229 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
3230 worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
3231 term_ms, term_ms * 100 / elapsed_ms, term_attempts,
3232 (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
3233 alloc_buffer_waste * HeapWordSize / K,
3234 undo_waste * HeapWordSize / K);
3235 }
3236
3237 class G1StringCleaningTask : public AbstractGangTask {
3238 private:
3239 BoolObjectClosure* _is_alive;
3240 G1StringDedupUnlinkOrOopsDoClosure _dedup_closure;
3241 OopStorage::ParState<false /* concurrent */, false /* const */> _par_state_string;
3242
3243 int _initial_string_table_size;
3244
3245 bool _process_strings;
3246 int _strings_processed;
3247 int _strings_removed;
3248
3249 bool _process_string_dedup;
3250
3251 public:
3252 G1StringCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_string_dedup) :
3253 AbstractGangTask("String Unlinking"),
3254 _is_alive(is_alive),
3255 _dedup_closure(is_alive, NULL, false),
3256 _par_state_string(StringTable::weak_storage()),
3257 _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
3258 _process_string_dedup(process_string_dedup) {
3259
3260 _initial_string_table_size = (int) StringTable::the_table()->table_size();
3261 if (process_strings) {
3262 StringTable::reset_dead_counter();
3263 }
3264 }
3265
3266 ~G1StringCleaningTask() {
3267 log_info(gc, stringtable)(
3268 "Cleaned string table, "
3269 "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
3270 strings_processed(), strings_removed());
3271 if (_process_strings) {
3272 StringTable::finish_dead_counter();
3273 }
3274 }
3275
3276 void work(uint worker_id) {
3277 int strings_processed = 0;
3278 int strings_removed = 0;
3279 if (_process_strings) {
3280 StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed);
3281 Atomic::add(strings_processed, &_strings_processed);
3282 Atomic::add(strings_removed, &_strings_removed);
3283 }
3284 if (_process_string_dedup) {
3285 G1StringDedup::parallel_unlink(&_dedup_closure, worker_id);
3286 }
3287 }
3288
3289 size_t strings_processed() const { return (size_t)_strings_processed; }
3290 size_t strings_removed() const { return (size_t)_strings_removed; }
3291 };
3292
3293 class G1CodeCacheUnloadingTask {
3294 private:
3295 static Monitor* _lock;
3296
3297 BoolObjectClosure* const _is_alive;
3298 const bool _unloading_occurred;
3299 const uint _num_workers;
3300
3301 // Variables used to claim nmethods.
3302 CompiledMethod* _first_nmethod;
3303 CompiledMethod* volatile _claimed_nmethod;
3304
3305 // The list of nmethods that need to be processed by the second pass.
3306 CompiledMethod* volatile _postponed_list;
3307 volatile uint _num_entered_barrier;
3308
3309 public:
3310 G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
3520 bool claim_resolved_method_task() {
3521 if (_resolved_method_task_claimed) {
3522 return false;
3523 }
3524 return Atomic::cmpxchg(1, &_resolved_method_task_claimed, 0) == 0;
3525 }
3526
3527 // These aren't big, one thread can do it all.
3528 void work() {
3529 if (claim_resolved_method_task()) {
3530 ResolvedMethodTable::unlink();
3531 }
3532 }
3533 };
3534
3535
3536 // To minimize the remark pause times, the tasks below are done in parallel.
3537 class G1ParallelCleaningTask : public AbstractGangTask {
3538 private:
3539 bool _unloading_occurred;
3540 G1StringCleaningTask _string_task;
3541 G1CodeCacheUnloadingTask _code_cache_task;
3542 G1KlassCleaningTask _klass_cleaning_task;
3543 G1ResolvedMethodCleaningTask _resolved_method_cleaning_task;
3544
3545 public:
3546 // The constructor is run in the VMThread.
3547 G1ParallelCleaningTask(BoolObjectClosure* is_alive, uint num_workers, bool unloading_occurred) :
3548 AbstractGangTask("Parallel Cleaning"),
3549 _string_task(is_alive, true, G1StringDedup::is_enabled()),
3550 _code_cache_task(num_workers, is_alive, unloading_occurred),
3551 _klass_cleaning_task(),
3552 _unloading_occurred(unloading_occurred),
3553 _resolved_method_cleaning_task() {
3554 }
3555
3556 // The parallel work done by all worker threads.
3557 void work(uint worker_id) {
3558 // Do first pass of code cache cleaning.
3559 _code_cache_task.work_first_pass(worker_id);
3560
3561 // Let the threads mark that the first pass is done.
3562 _code_cache_task.barrier_mark(worker_id);
3563
3564 // Clean the Strings.
3565 _string_task.work(worker_id);
3566
3567 // Clean unreferenced things in the ResolvedMethodTable
3568 _resolved_method_cleaning_task.work();
3569
3570 // Wait for all workers to finish the first code cache cleaning pass.
3571 _code_cache_task.barrier_wait(worker_id);
3572
3573 // Do the second code cache cleaning work, which realize on
3574 // the liveness information gathered during the first pass.
3575 _code_cache_task.work_second_pass(worker_id);
3576
3577 // Clean all klasses that were not unloaded.
3578 // The weak metadata in klass doesn't need to be
3579 // processed if there was no unloading.
3580 if (_unloading_occurred) {
3581 _klass_cleaning_task.work();
3582 }
3583 }
3584 };
3585
3586
3587 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
3588 bool class_unloading_occurred) {
3589 uint n_workers = workers()->active_workers();
3590
3591 G1ParallelCleaningTask g1_unlink_task(is_alive, n_workers, class_unloading_occurred);
3592 workers()->run_task(&g1_unlink_task);
3593 }
3594
3595 void G1CollectedHeap::partial_cleaning(BoolObjectClosure* is_alive,
3596 bool process_strings,
3597 bool process_string_dedup) {
3598 if (!process_strings && !process_string_dedup) {
3599 // Nothing to clean.
3600 return;
3601 }
3602
3603 G1StringCleaningTask g1_unlink_task(is_alive, process_strings, process_string_dedup);
3604 workers()->run_task(&g1_unlink_task);
3605 }
3606
3607 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3608 private:
3609 DirtyCardQueueSet* _queue;
3610 G1CollectedHeap* _g1h;
3611 public:
3612 G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3613 _queue(queue), _g1h(g1h) { }
3614
3615 virtual void work(uint worker_id) {
3616 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3617 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3618
3619 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3620 _queue->par_apply_closure_to_all_completed_buffers(&cl);
3621
3622 phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3623 }
3624 };
3978 phase_times->record_par_time(par_time_ms);
3979
3980 double code_root_fixup_time_ms =
3981 (os::elapsedTime() - end_par_time_sec) * 1000.0;
3982 phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
3983 }
3984
3985 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3986 // Also cleans the card table from temporary duplicate detection information used
3987 // during UpdateRS/ScanRS.
3988 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
3989
3990 // Process any discovered reference objects - we have
3991 // to do this _before_ we retire the GC alloc regions
3992 // as we may have to copy some 'reachable' referent
3993 // objects (and their reachable sub-graphs) that were
3994 // not copied during the pause.
3995 process_discovered_references(per_thread_states);
3996
3997 // FIXME
3998 // CM's reference processing also cleans up the string table.
3999 // Should we do that here also? We could, but it is a serial operation
4000 // and could significantly increase the pause time.
4001
4002 G1STWIsAliveClosure is_alive(this);
4003 G1KeepAliveClosure keep_alive(this);
4004
4005 {
4006 double start = os::elapsedTime();
4007
4008 WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
4009
4010 double time_ms = (os::elapsedTime() - start) * 1000.0;
4011 g1_policy()->phase_times()->record_weak_ref_proc_time(time_ms);
4012 }
4013
4014 if (G1StringDedup::is_enabled()) {
4015 double fixup_start = os::elapsedTime();
4016
4017 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4018
|