32 #include "gc/parallel/psParallelCompact.inline.hpp"
33 #include "gc/shared/taskqueue.inline.hpp"
34 #include "logging/log.hpp"
35 #include "memory/iterator.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/compressedOops.inline.hpp"
38 #include "oops/instanceKlass.inline.hpp"
39 #include "oops/instanceMirrorKlass.inline.hpp"
40 #include "oops/objArrayKlass.inline.hpp"
41 #include "oops/oop.inline.hpp"
42
43 PSOldGen* ParCompactionManager::_old_gen = NULL;
44 ParCompactionManager** ParCompactionManager::_manager_array = NULL;
45
46 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
47 ParCompactionManager::ObjArrayTaskQueueSet*
48 ParCompactionManager::_objarray_queues = NULL;
49 ObjectStartArray* ParCompactionManager::_start_array = NULL;
50 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
51 RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
52
53 ParCompactionManager::ParCompactionManager() :
54 _action(CopyAndUpdate) {
55
56 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
57
58 _old_gen = heap->old_gen();
59 _start_array = old_gen()->start_array();
60
61 marking_stack()->initialize();
62 _objarray_stack.initialize();
63 _region_stack.initialize();
64
65 reset_bitmap_query_cache();
66 }
67
68 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
69 assert(ParallelScavengeHeap::heap() != NULL,
70 "Needed for initialization");
71
82 guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
83 _region_array = new RegionTaskQueueSet(parallel_gc_threads);
84 guarantee(_region_array != NULL, "Could not allocate region_array");
85
86 // Create and register the ParCompactionManager(s) for the worker threads.
87 for(uint i=0; i<parallel_gc_threads; i++) {
88 _manager_array[i] = new ParCompactionManager();
89 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
90 stack_array()->register_queue(i, _manager_array[i]->marking_stack());
91 _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
92 region_array()->register_queue(i, _manager_array[i]->region_stack());
93 }
94
95 // The VMThread gets its own ParCompactionManager, which is not available
96 // for work stealing.
97 _manager_array[parallel_gc_threads] = new ParCompactionManager();
98 guarantee(_manager_array[parallel_gc_threads] != NULL,
99 "Could not create ParCompactionManager");
100 assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0,
101 "Not initialized?");
102 }
103
104 void ParCompactionManager::reset_all_bitmap_query_caches() {
105 uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers();
106 for (uint i=0; i<=parallel_gc_threads; i++) {
107 _manager_array[i]->reset_bitmap_query_cache();
108 }
109 }
110
111 bool ParCompactionManager::should_update() {
112 assert(action() != NotValid, "Action is not set");
113 return (action() == ParCompactionManager::Update) ||
114 (action() == ParCompactionManager::CopyAndUpdate) ||
115 (action() == ParCompactionManager::UpdateAndCopy);
116 }
117
118 bool ParCompactionManager::should_copy() {
119 assert(action() != NotValid, "Action is not set");
120 return (action() == ParCompactionManager::Copy) ||
121 (action() == ParCompactionManager::CopyAndUpdate) ||
146 follow_array((objArrayOop)task.obj(), task.index());
147 }
148 } while (!marking_stacks_empty());
149
150 assert(marking_stacks_empty(), "Sanity");
151 }
152
153 void ParCompactionManager::drain_region_stacks() {
154 do {
155 // Drain overflow stack first so other threads can steal.
156 size_t region_index;
157 while (region_stack()->pop_overflow(region_index)) {
158 PSParallelCompact::fill_and_update_region(this, region_index);
159 }
160
161 while (region_stack()->pop_local(region_index)) {
162 PSParallelCompact::fill_and_update_region(this, region_index);
163 }
164 } while (!region_stack()->is_empty());
165 }
|
32 #include "gc/parallel/psParallelCompact.inline.hpp"
33 #include "gc/shared/taskqueue.inline.hpp"
34 #include "logging/log.hpp"
35 #include "memory/iterator.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/compressedOops.inline.hpp"
38 #include "oops/instanceKlass.inline.hpp"
39 #include "oops/instanceMirrorKlass.inline.hpp"
40 #include "oops/objArrayKlass.inline.hpp"
41 #include "oops/oop.inline.hpp"
42
43 PSOldGen* ParCompactionManager::_old_gen = NULL;
44 ParCompactionManager** ParCompactionManager::_manager_array = NULL;
45
46 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
47 ParCompactionManager::ObjArrayTaskQueueSet*
48 ParCompactionManager::_objarray_queues = NULL;
49 ObjectStartArray* ParCompactionManager::_start_array = NULL;
50 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
51 RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
52 GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = NULL;
53 Monitor* ParCompactionManager::_shadow_region_monitor = NULL;
54
55 ParCompactionManager::ParCompactionManager() :
56 _action(CopyAndUpdate) {
57
58 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
59
60 _old_gen = heap->old_gen();
61 _start_array = old_gen()->start_array();
62
63 marking_stack()->initialize();
64 _objarray_stack.initialize();
65 _region_stack.initialize();
66
67 reset_bitmap_query_cache();
68 }
69
70 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
71 assert(ParallelScavengeHeap::heap() != NULL,
72 "Needed for initialization");
73
84 guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
85 _region_array = new RegionTaskQueueSet(parallel_gc_threads);
86 guarantee(_region_array != NULL, "Could not allocate region_array");
87
88 // Create and register the ParCompactionManager(s) for the worker threads.
89 for(uint i=0; i<parallel_gc_threads; i++) {
90 _manager_array[i] = new ParCompactionManager();
91 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
92 stack_array()->register_queue(i, _manager_array[i]->marking_stack());
93 _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
94 region_array()->register_queue(i, _manager_array[i]->region_stack());
95 }
96
97 // The VMThread gets its own ParCompactionManager, which is not available
98 // for work stealing.
99 _manager_array[parallel_gc_threads] = new ParCompactionManager();
100 guarantee(_manager_array[parallel_gc_threads] != NULL,
101 "Could not create ParCompactionManager");
102 assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0,
103 "Not initialized?");
104
105 _shadow_region_array = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<size_t >(10, true);
106
107 _shadow_region_monitor = new Monitor(Mutex::barrier, "CompactionManager monitor",
108 Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never);
109 }
110
111 void ParCompactionManager::reset_all_bitmap_query_caches() {
112 uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers();
113 for (uint i=0; i<=parallel_gc_threads; i++) {
114 _manager_array[i]->reset_bitmap_query_cache();
115 }
116 }
117
118 bool ParCompactionManager::should_update() {
119 assert(action() != NotValid, "Action is not set");
120 return (action() == ParCompactionManager::Update) ||
121 (action() == ParCompactionManager::CopyAndUpdate) ||
122 (action() == ParCompactionManager::UpdateAndCopy);
123 }
124
125 bool ParCompactionManager::should_copy() {
126 assert(action() != NotValid, "Action is not set");
127 return (action() == ParCompactionManager::Copy) ||
128 (action() == ParCompactionManager::CopyAndUpdate) ||
153 follow_array((objArrayOop)task.obj(), task.index());
154 }
155 } while (!marking_stacks_empty());
156
157 assert(marking_stacks_empty(), "Sanity");
158 }
159
160 void ParCompactionManager::drain_region_stacks() {
161 do {
162 // Drain overflow stack first so other threads can steal.
163 size_t region_index;
164 while (region_stack()->pop_overflow(region_index)) {
165 PSParallelCompact::fill_and_update_region(this, region_index);
166 }
167
168 while (region_stack()->pop_local(region_index)) {
169 PSParallelCompact::fill_and_update_region(this, region_index);
170 }
171 } while (!region_stack()->is_empty());
172 }
173
174 size_t ParCompactionManager::pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr) {
175 MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
176 while (true) {
177 if (!_shadow_region_array->is_empty()) {
178 return _shadow_region_array->pop();
179 }
180 // Check if the corresponding heap region is available now.
181 // If so, we don't need to get a shadow region anymore, and
182 // we return InvalidShadow to indicate such a case.
183 if (region_ptr->claimed()) {
184 return InvalidShadow;
185 }
186 ml.wait(1);
187 }
188 }
189
190 void ParCompactionManager::push_shadow_region_mt_safe(size_t shadow_region) {
191 MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
192 _shadow_region_array->push(shadow_region);
193 ml.notify();
194 }
195
196 void ParCompactionManager::push_shadow_region(size_t shadow_region) {
197 _shadow_region_array->push(shadow_region);
198 }
199
200 void ParCompactionManager::remove_all_shadow_regions() {
201 _shadow_region_array->clear();
202 }
|