40 #include "memory/genOopClosures.inline.hpp"
41 #include "memory/generation.hpp"
42 #include "memory/referencePolicy.hpp"
43 #include "memory/resourceArea.hpp"
44 #include "memory/sharedHeap.hpp"
45 #include "memory/space.hpp"
46 #include "oops/objArrayOop.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "oops/oop.pcgc.inline.hpp"
49 #include "runtime/atomic.inline.hpp"
50 #include "runtime/handles.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/thread.inline.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/globalDefinitions.hpp"
56 #include "utilities/workgroup.hpp"
57
58 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
59
60 #ifdef _MSC_VER
61 #pragma warning( push )
62 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
63 #endif
64 ParScanThreadState::ParScanThreadState(Space* to_space_,
65 ParNewGeneration* gen_,
66 Generation* old_gen_,
67 int thread_num_,
68 ObjToScanQueueSet* work_queue_set_,
69 Stack<oop, mtGC>* overflow_stacks_,
70 size_t desired_plab_sz_,
71 ParallelTaskTerminator& term_) :
72 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
73 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
74 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
75 _ageTable(false), // false ==> not the global age table, no perf data.
76 _to_space_alloc_buffer(desired_plab_sz_),
77 _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
78 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
79 _older_gen_closure(gen_, this),
80 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
81 &_to_space_root_closure, gen_, &_old_gen_root_closure,
82 work_queue_set_, &term_),
83 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
84 _keep_alive_closure(&_scan_weak_ref_closure),
85 _strong_roots_time(0.0), _term_time(0.0)
86 {
87 #if TASKQUEUE_STATS
88 _term_attempts = 0;
89 _overflow_refills = 0;
90 _overflow_refill_objs = 0;
91 #endif // TASKQUEUE_STATS
92
93 _survivor_chunk_array =
94 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
95 _hash_seed = 17; // Might want to take time-based random value.
96 _start = os::elapsedTime();
97 _old_gen_closure.set_generation(old_gen_);
98 _old_gen_root_closure.set_generation(old_gen_);
99 }
100 #ifdef _MSC_VER
101 #pragma warning( pop )
102 #endif
103
104 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
105 size_t plab_word_size) {
106 ChunkArray* sca = survivor_chunk_array();
107 if (sca != NULL) {
108 // A non-null SCA implies that we want the PLAB data recorded.
109 sca->record_sample(plab_start, plab_word_size);
110 }
111 }
112
113 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
114 return new_obj->is_objArray() &&
115 arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
116 new_obj != old_obj;
117 }
118
119 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
120 assert(old->is_objArray(), "must be obj array");
121 assert(old->is_forwarded(), "must be forwarded");
122 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
499 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
500 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
501
502 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
503 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
504
505 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
506 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
507
508 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
509 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
510
511 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
512 ParScanThreadState* par_scan_state)
513 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
514 {}
515
516 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
517 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
518
519 #ifdef WIN32
520 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
521 #endif
522
523 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
524 ParScanThreadState* par_scan_state_,
525 ParScanWithoutBarrierClosure* to_space_closure_,
526 ParScanWithBarrierClosure* old_gen_closure_,
527 ParRootScanWithoutBarrierClosure* to_space_root_closure_,
528 ParNewGeneration* par_gen_,
529 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
530 ObjToScanQueueSet* task_queues_,
531 ParallelTaskTerminator* terminator_) :
532
533 _par_scan_state(par_scan_state_),
534 _to_space_closure(to_space_closure_),
535 _old_gen_closure(old_gen_closure_),
536 _to_space_root_closure(to_space_root_closure_),
537 _old_gen_root_closure(old_gen_root_closure_),
538 _par_gen(par_gen_),
539 _task_queues(task_queues_),
540 _terminator(terminator_)
541 {}
542
617 &par_scan_state.to_space_root_closure(),
618 false);
619
620 par_scan_state.start_strong_roots();
621 gch->gen_process_roots(_gen->level(),
622 true, // Process younger gens, if any,
623 // as strong roots.
624 false, // no scope; this is parallel code
625 SharedHeap::SO_ScavengeCodeCache,
626 GenCollectedHeap::StrongAndWeakRoots,
627 &par_scan_state.to_space_root_closure(),
628 &par_scan_state.older_gen_closure(),
629 &cld_scan_closure);
630
631 par_scan_state.end_strong_roots();
632
633 // "evacuate followers".
634 par_scan_state.evacuate_followers_closure().do_void();
635 }
636
637 #ifdef _MSC_VER
638 #pragma warning( push )
639 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
640 #endif
641 ParNewGeneration::
642 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
643 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
644 _overflow_list(NULL),
645 _is_alive_closure(this),
646 _plab_stats(YoungPLABSize, PLABWeight)
647 {
648 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
649 NOT_PRODUCT(_num_par_pushes = 0;)
650 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
651 guarantee(_task_queues != NULL, "task_queues allocation failure.");
652
653 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
654 ObjToScanQueue *q = new ObjToScanQueue();
655 guarantee(q != NULL, "work_queue Allocation failure.");
656 _task_queues->register_queue(i1, q);
657 }
658
659 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
660 _task_queues->queue(i2)->initialize();
661
662 _overflow_stacks = NULL;
663 if (ParGCUseLocalOverflow) {
664
665 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
666 // with ','
667 typedef Stack<oop, mtGC> GCOopStack;
668
669 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
670 for (size_t i = 0; i < ParallelGCThreads; ++i) {
671 new (_overflow_stacks + i) Stack<oop, mtGC>();
672 }
673 }
674
675 if (UsePerfData) {
676 EXCEPTION_MARK;
677 ResourceMark rm;
678
679 const char* cname =
680 PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
681 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
682 ParallelGCThreads, CHECK);
683 }
684 }
685 #ifdef _MSC_VER
686 #pragma warning( pop )
687 #endif
688
689 // ParNewGeneration::
690 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
691 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
692
693 template <class T>
694 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
695 #ifdef ASSERT
696 {
697 assert(!oopDesc::is_null(*p), "expected non-null ref");
698 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
699 // We never expect to see a null reference being processed
700 // as a weak reference.
701 assert(obj->is_oop(), "expected an oop while scanning weak refs");
702 }
703 #endif // ASSERT
704
705 _par_cl->do_oop_nv(p);
706
707 if (Universe::heap()->is_in_reserved(p)) {
|
40 #include "memory/genOopClosures.inline.hpp"
41 #include "memory/generation.hpp"
42 #include "memory/referencePolicy.hpp"
43 #include "memory/resourceArea.hpp"
44 #include "memory/sharedHeap.hpp"
45 #include "memory/space.hpp"
46 #include "oops/objArrayOop.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "oops/oop.pcgc.inline.hpp"
49 #include "runtime/atomic.inline.hpp"
50 #include "runtime/handles.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/thread.inline.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/globalDefinitions.hpp"
56 #include "utilities/workgroup.hpp"
57
58 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
59
60 ParScanThreadState::ParScanThreadState(Space* to_space_,
61 ParNewGeneration* gen_,
62 Generation* old_gen_,
63 int thread_num_,
64 ObjToScanQueueSet* work_queue_set_,
65 Stack<oop, mtGC>* overflow_stacks_,
66 size_t desired_plab_sz_,
67 ParallelTaskTerminator& term_) :
68 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
69 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
70 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
71 _ageTable(false), // false ==> not the global age table, no perf data.
72 _to_space_alloc_buffer(desired_plab_sz_),
73 _is_alive_closure(gen_),
74 _keep_alive_closure(&_scan_weak_ref_closure),
75 _strong_roots_time(0.0), _term_time(0.0)
76 {
77 _to_space_closure = ParScanWithoutBarrierClosure(gen_, this);
78 _old_gen_closure = ParScanWithBarrierClosure(gen_, this);
79 _to_space_root_closure = ParRootScanWithoutBarrierClosure(gen_, this);
80 _old_gen_root_closure = ParRootScanWithBarrierTwoGensClosure(gen_, this);
81 _older_gen_closure = ParRootScanWithBarrierTwoGensClosure(gen_, this);
82 _scan_weak_ref_closure = ParScanWeakRefClosure(gen_, this);
83
84 _evacuate_followers = ParEvacuateFollowersClosure(this,
85 &_to_space_closure,
86 &_old_gen_closure,
87 &_to_space_root_closure,
88 gen_,
89 &_old_gen_root_closure,
90 work_queue_set_,
91 &term_);
92
93
94 #if TASKQUEUE_STATS
95 _term_attempts = 0;
96 _overflow_refills = 0;
97 _overflow_refill_objs = 0;
98 #endif // TASKQUEUE_STATS
99
100 _survivor_chunk_array =
101 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
102 _hash_seed = 17; // Might want to take time-based random value.
103 _start = os::elapsedTime();
104 _old_gen_closure.set_generation(old_gen_);
105 _old_gen_root_closure.set_generation(old_gen_);
106 }
107
108 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
109 size_t plab_word_size) {
110 ChunkArray* sca = survivor_chunk_array();
111 if (sca != NULL) {
112 // A non-null SCA implies that we want the PLAB data recorded.
113 sca->record_sample(plab_start, plab_word_size);
114 }
115 }
116
117 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
118 return new_obj->is_objArray() &&
119 arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
120 new_obj != old_obj;
121 }
122
123 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
124 assert(old->is_objArray(), "must be obj array");
125 assert(old->is_forwarded(), "must be forwarded");
126 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
503 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
504 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
505
506 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
507 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
508
509 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
510 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
511
512 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
513 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
514
515 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
516 ParScanThreadState* par_scan_state)
517 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
518 {}
519
520 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
521 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
522
523 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
524 ParScanThreadState* par_scan_state_,
525 ParScanWithoutBarrierClosure* to_space_closure_,
526 ParScanWithBarrierClosure* old_gen_closure_,
527 ParRootScanWithoutBarrierClosure* to_space_root_closure_,
528 ParNewGeneration* par_gen_,
529 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
530 ObjToScanQueueSet* task_queues_,
531 ParallelTaskTerminator* terminator_) :
532
533 _par_scan_state(par_scan_state_),
534 _to_space_closure(to_space_closure_),
535 _old_gen_closure(old_gen_closure_),
536 _to_space_root_closure(to_space_root_closure_),
537 _old_gen_root_closure(old_gen_root_closure_),
538 _par_gen(par_gen_),
539 _task_queues(task_queues_),
540 _terminator(terminator_)
541 {}
542
617 &par_scan_state.to_space_root_closure(),
618 false);
619
620 par_scan_state.start_strong_roots();
621 gch->gen_process_roots(_gen->level(),
622 true, // Process younger gens, if any,
623 // as strong roots.
624 false, // no scope; this is parallel code
625 SharedHeap::SO_ScavengeCodeCache,
626 GenCollectedHeap::StrongAndWeakRoots,
627 &par_scan_state.to_space_root_closure(),
628 &par_scan_state.older_gen_closure(),
629 &cld_scan_closure);
630
631 par_scan_state.end_strong_roots();
632
633 // "evacuate followers".
634 par_scan_state.evacuate_followers_closure().do_void();
635 }
636
637 ParNewGeneration::
638 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
639 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
640 _overflow_list(NULL),
641 _plab_stats(YoungPLABSize, PLABWeight)
642 {
643 _is_alive_closure = DefNewGeneration::IsAliveClosure(this);
644 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
645 NOT_PRODUCT(_num_par_pushes = 0;)
646 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
647 guarantee(_task_queues != NULL, "task_queues allocation failure.");
648
649 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
650 ObjToScanQueue *q = new ObjToScanQueue();
651 guarantee(q != NULL, "work_queue Allocation failure.");
652 _task_queues->register_queue(i1, q);
653 }
654
655 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
656 _task_queues->queue(i2)->initialize();
657
658 _overflow_stacks = NULL;
659 if (ParGCUseLocalOverflow) {
660
661 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
662 // with ','
663 typedef Stack<oop, mtGC> GCOopStack;
664
665 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
666 for (size_t i = 0; i < ParallelGCThreads; ++i) {
667 new (_overflow_stacks + i) Stack<oop, mtGC>();
668 }
669 }
670
671 if (UsePerfData) {
672 EXCEPTION_MARK;
673 ResourceMark rm;
674
675 const char* cname =
676 PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
677 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
678 ParallelGCThreads, CHECK);
679 }
680 }
681
682 // ParNewGeneration::
683 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
684 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
685
686 template <class T>
687 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
688 #ifdef ASSERT
689 {
690 assert(!oopDesc::is_null(*p), "expected non-null ref");
691 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
692 // We never expect to see a null reference being processed
693 // as a weak reference.
694 assert(obj->is_oop(), "expected an oop while scanning weak refs");
695 }
696 #endif // ASSERT
697
698 _par_cl->do_oop_nv(p);
699
700 if (Universe::heap()->is_in_reserved(p)) {
|