116 }
117
118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
119 if (zero_filled) {
120 return;
121 }
122 // We need to clear the bitmap on commit, removing any existing information.
123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
124 _bm->clear_range(mr);
125 }
126
127 void G1CMBitMap::clear_range(MemRegion mr) {
128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
129 assert(!mr.is_empty(), "unexpected empty region");
130 // convert address range into offset range
131 _bm.at_put_range(heapWordToOffset(mr.start()),
132 heapWordToOffset(mr.end()), false);
133 }
134
135 G1CMMarkStack::G1CMMarkStack() :
136 _reserved_space(),
137 _base(NULL),
138 _capacity(0),
139 _saved_index((size_t)AllBits),
140 _should_expand(false) {
141 set_empty();
142 }
143
144 bool G1CMMarkStack::resize(size_t new_capacity) {
145 assert(is_empty(), "Only resize when stack is empty.");
146 assert(new_capacity <= MarkStackSizeMax,
147 "Trying to resize stack to " SIZE_FORMAT " elements when the maximum is " SIZE_FORMAT, new_capacity, MarkStackSizeMax);
148
149 size_t reservation_size = ReservedSpace::allocation_align_size_up(new_capacity * sizeof(oop));
150
151 ReservedSpace rs(reservation_size);
152 if (!rs.is_reserved()) {
153 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " elements and size " SIZE_FORMAT "B.", new_capacity, reservation_size);
154 return false;
155 }
156
157 VirtualSpace vs;
158
159 if (!vs.initialize(rs, rs.size())) {
160 rs.release();
161 log_warning(gc)("Failed to commit memory for new overflow mark stack of size " SIZE_FORMAT "B.", rs.size());
162 return false;
163 }
164
165 assert(vs.committed_size() == rs.size(), "Failed to commit all of the mark stack.");
166
167 // Release old mapping.
168 _reserved_space.release();
169
170 // Save new mapping for future unmapping.
171 _reserved_space = rs;
172
173 MemTracker::record_virtual_memory_type((address)_reserved_space.base(), mtGC);
174
175 _base = (oop*) vs.low();
176 _capacity = new_capacity;
177 set_empty();
178 _should_expand = false;
179
180 return true;
181 }
182
183 bool G1CMMarkStack::allocate(size_t capacity) {
184 return resize(capacity);
185 }
186
187 void G1CMMarkStack::expand() {
188 // Clear expansion flag
189 _should_expand = false;
190
191 if (_capacity == MarkStackSizeMax) {
192 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " elements.", _capacity);
193 return;
194 }
195 size_t old_capacity = _capacity;
196 // Double capacity if possible
197 size_t new_capacity = MIN2(old_capacity * 2, MarkStackSizeMax);
198
199 if (resize(new_capacity)) {
200 log_debug(gc)("Expanded marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements",
201 old_capacity, new_capacity);
202 } else {
203 log_warning(gc)("Failed to expand marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements",
204 old_capacity, new_capacity);
205 }
206 }
207
208 G1CMMarkStack::~G1CMMarkStack() {
209 if (_base != NULL) {
210 _base = NULL;
211 _reserved_space.release();
212 }
213 }
214
215 void G1CMMarkStack::par_push_arr(oop* buffer, size_t n) {
216 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
217 size_t start = _index;
218 size_t next_index = start + n;
219 if (next_index > _capacity) {
220 _overflow = true;
221 return;
222 }
223 // Otherwise.
224 _index = next_index;
225 for (size_t i = 0; i < n; i++) {
226 size_t ind = start + i;
227 assert(ind < _capacity, "By overflow test above.");
228 _base[ind] = buffer[i];
229 }
230 }
231
232 bool G1CMMarkStack::par_pop_arr(oop* buffer, size_t max, size_t* n) {
233 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
234 size_t index = _index;
235 if (index == 0) {
236 *n = 0;
237 return false;
238 } else {
239 size_t k = MIN2(max, index);
240 size_t new_ind = index - k;
241 for (size_t j = 0; j < k; j++) {
242 buffer[j] = _base[new_ind + j];
243 }
244 _index = new_ind;
245 *n = k;
246 return true;
247 }
248 }
249
250 void G1CMMarkStack::note_start_of_gc() {
251 assert(_saved_index == (size_t)AllBits, "note_start_of_gc()/end_of_gc() calls bracketed incorrectly");
252 _saved_index = _index;
253 }
254
255 void G1CMMarkStack::note_end_of_gc() {
256 guarantee(!stack_modified(), "Saved index " SIZE_FORMAT " must be the same as " SIZE_FORMAT, _saved_index, _index);
257
258 _saved_index = (size_t)AllBits;
259 }
260
261 G1CMRootRegions::G1CMRootRegions() :
262 _cm(NULL), _scan_in_progress(false),
263 _should_abort(false), _claimed_survivor_index(0) { }
264
265 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
266 _survivors = survivors;
267 _cm = cm;
268 }
269
270 void G1CMRootRegions::prepare_for_scan() {
271 assert(!scan_in_progress(), "pre-condition");
272
273 // Currently, only survivors can be root regions.
274 _claimed_survivor_index = 0;
275 _scan_in_progress = _survivors->regions()->is_nonempty();
276 _should_abort = false;
277 }
278
466 // Verify MarkStackSize is in range.
467 if (FLAG_IS_CMDLINE(MarkStackSize)) {
468 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
469 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
470 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
471 "must be between 1 and " SIZE_FORMAT,
472 MarkStackSize, MarkStackSizeMax);
473 return;
474 }
475 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
476 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
477 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
478 " or for MarkStackSizeMax (" SIZE_FORMAT ")",
479 MarkStackSize, MarkStackSizeMax);
480 return;
481 }
482 }
483 }
484 }
485
486 if (!_global_mark_stack.allocate(MarkStackSize)) {
487 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
488 return;
489 }
490
491 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC);
492 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
493
494 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
495 _active_tasks = _max_worker_id;
496
497 for (uint i = 0; i < _max_worker_id; ++i) {
498 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
499 task_queue->initialize();
500 _task_queues->register_queue(i, task_queue);
501
502 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues);
503
504 _accum_task_vtime[i] = 0.0;
505 }
506
507 // so that the call below can read a sensible value
508 _heap_start = g1h->reserved_region().start();
1676
1677 // Set the degree of MT processing here. If the discovery was done MT,
1678 // the number of threads involved during discovery could differ from
1679 // the number of active workers. This is OK as long as the discovered
1680 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1681 rp->set_active_mt_degree(active_workers);
1682
1683 // Process the weak references.
1684 const ReferenceProcessorStats& stats =
1685 rp->process_discovered_references(&g1_is_alive,
1686 &g1_keep_alive,
1687 &g1_drain_mark_stack,
1688 executor,
1689 _gc_timer_cm);
1690 _gc_tracer_cm->report_gc_reference_stats(stats);
1691
1692 // The do_oop work routines of the keep_alive and drain_marking_stack
1693 // oop closures will set the has_overflown flag if we overflow the
1694 // global marking stack.
1695
1696 assert(_global_mark_stack.overflow() || _global_mark_stack.is_empty(),
1697 "mark stack should be empty (unless it overflowed)");
1698
1699 if (_global_mark_stack.overflow()) {
1700 // This should have been done already when we tried to push an
1701 // entry on to the global mark stack. But let's do it again.
1702 set_has_overflown();
1703 }
1704
1705 assert(rp->num_q() == active_workers, "why not");
1706
1707 rp->enqueue_discovered_references(executor);
1708
1709 rp->verify_no_references_recorded();
1710 assert(!rp->discovery_enabled(), "Post condition");
1711 }
1712
1713 if (has_overflown()) {
1714 // We can not trust g1_is_alive if the marking stack overflowed
1715 return;
1716 }
1717
1718 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1719
2323 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2324 _words_scanned_limit = _real_words_scanned_limit;
2325
2326 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2327 _refs_reached_limit = _real_refs_reached_limit;
2328 }
2329
2330 void G1CMTask::decrease_limits() {
2331 // This is called when we believe that we're going to do an infrequent
2332 // operation which will increase the per byte scanned cost (i.e. move
2333 // entries to/from the global stack). It basically tries to decrease the
2334 // scanning limit so that the clock is called earlier.
2335
2336 _words_scanned_limit = _real_words_scanned_limit -
2337 3 * words_scanned_period / 4;
2338 _refs_reached_limit = _real_refs_reached_limit -
2339 3 * refs_reached_period / 4;
2340 }
2341
2342 void G1CMTask::move_entries_to_global_stack() {
2343 // local array where we'll store the entries that will be popped
2344 // from the local queue
2345 oop buffer[global_stack_transfer_size];
2346
2347 int n = 0;
2348 oop obj;
2349 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
2350 buffer[n] = obj;
2351 ++n;
2352 }
2353
2354 if (n > 0) {
2355 // we popped at least one entry from the local queue
2356
2357 if (!_cm->mark_stack_push(buffer, n)) {
2358 set_has_aborted();
2359 }
2360 }
2361
2362 // this operation was quite expensive, so decrease the limits
2363 decrease_limits();
2364 }
2365
2366 void G1CMTask::get_entries_from_global_stack() {
2367 // local array where we'll store the entries that will be popped
2368 // from the global stack.
2369 oop buffer[global_stack_transfer_size];
2370 size_t n;
2371 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
2372 assert(n <= global_stack_transfer_size,
2373 "we should not pop more than the given limit");
2374 if (n > 0) {
2375 // yes, we did actually pop at least one entry
2376 for (size_t i = 0; i < n; ++i) {
2377 bool success = _task_queue->push(buffer[i]);
2378 // We only call this when the local queue is empty or under a
2379 // given target limit. So, we do not expect this push to fail.
2380 assert(success, "invariant");
2381 }
2382 }
2383
2384 // this operation was quite expensive, so decrease the limits
2385 decrease_limits();
2386 }
2387
2388 void G1CMTask::drain_local_queue(bool partially) {
2389 if (has_aborted()) return;
2390
2391 // Decide what the target size is, depending whether we're going to
2392 // drain it partially (so that other tasks can steal if they run out
2393 // of things to do) or totally (at the very end).
2394 size_t target_size;
2395 if (partially) {
2396 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
2397 } else {
2398 target_size = 0;
2399 }
2400
2401 if (_task_queue->size() > target_size) {
2402 oop obj;
2403 bool ret = _task_queue->pop_local(obj);
2404 while (ret) {
2405 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
2409 scan_object(obj);
2410
2411 if (_task_queue->size() <= target_size || has_aborted()) {
2412 ret = false;
2413 } else {
2414 ret = _task_queue->pop_local(obj);
2415 }
2416 }
2417 }
2418 }
2419
2420 void G1CMTask::drain_global_stack(bool partially) {
2421 if (has_aborted()) return;
2422
2423 // We have a policy to drain the local queue before we attempt to
2424 // drain the global stack.
2425 assert(partially || _task_queue->size() == 0, "invariant");
2426
2427 // Decide what the target size is, depending whether we're going to
2428 // drain it partially (so that other tasks can steal if they run out
2429 // of things to do) or totally (at the very end). Notice that,
2430 // because we move entries from the global stack in chunks or
2431 // because another task might be doing the same, we might in fact
2432 // drop below the target. But, this is not a problem.
2433 size_t target_size;
2434 if (partially) {
2435 target_size = _cm->partial_mark_stack_size_target();
2436 } else {
2437 target_size = 0;
2438 }
2439
2440 if (_cm->mark_stack_size() > target_size) {
2441 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2442 get_entries_from_global_stack();
2443 drain_local_queue(partially);
2444 }
2445 }
2446 }
2447
2448 // SATB Queue has several assumptions on whether to call the par or
2449 // non-par versions of the methods. this is why some of the code is
2450 // replicated. We should really get rid of the single-threaded version
2451 // of the code to simplify things.
2452 void G1CMTask::drain_satb_buffers() {
2453 if (has_aborted()) return;
2454
2455 // We set this so that the regular clock knows that we're in the
2456 // middle of draining buffers and doesn't set the abort flag when it
2457 // notices that SATB buffers are available for draining. It'd be
2458 // very counter productive if it did that. :-)
2459 _draining_satb_buffers = true;
2460
2461 G1CMSATBBufferClosure satb_cl(this, _g1h);
2462 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
116 }
117
118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
119 if (zero_filled) {
120 return;
121 }
122 // We need to clear the bitmap on commit, removing any existing information.
123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
124 _bm->clear_range(mr);
125 }
126
127 void G1CMBitMap::clear_range(MemRegion mr) {
128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
129 assert(!mr.is_empty(), "unexpected empty region");
130 // convert address range into offset range
131 _bm.at_put_range(heapWordToOffset(mr.start()),
132 heapWordToOffset(mr.end()), false);
133 }
134
135 G1CMMarkStack::G1CMMarkStack() :
136 _max_chunk_capacity(0),
137 _base(NULL),
138 _chunk_capacity(0),
139 _out_of_memory(false),
140 _should_expand(false) {
141 set_empty();
142 }
143
144 bool G1CMMarkStack::resize(size_t new_capacity) {
145 assert(is_empty(), "Only resize when stack is empty.");
146 assert(new_capacity <= _max_chunk_capacity,
147 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
148
149 OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity);
150
151 if (new_base == NULL) {
152 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk));
153 return false;
154 }
155 // Release old mapping.
156 if (_base != NULL) {
157 MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
158 }
159
160 _base = new_base;
161 _chunk_capacity = new_capacity;
162 set_empty();
163 _should_expand = false;
164
165 return true;
166 }
167
168 size_t G1CMMarkStack::capacity_alignment() {
169 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(OopChunk)) / sizeof(void*);
170 }
171
172 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
173 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
174
175 size_t const OopChunkSizeInVoidStar = sizeof(OopChunk) / sizeof(void*);
176
177 _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
178 size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
179
180 guarantee(initial_chunk_capacity <= _max_chunk_capacity,
181 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
182 _max_chunk_capacity,
183 initial_chunk_capacity);
184
185 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
186 initial_chunk_capacity, _max_chunk_capacity);
187
188 return resize(initial_chunk_capacity);
189 }
190
191 void G1CMMarkStack::expand() {
192 // Clear expansion flag
193 _should_expand = false;
194
195 if (_chunk_capacity == _max_chunk_capacity) {
196 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
197 return;
198 }
199 size_t old_capacity = _chunk_capacity;
200 // Double capacity if possible
201 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
202
203 if (resize(new_capacity)) {
204 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
205 old_capacity, new_capacity);
206 } else {
207 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
208 old_capacity, new_capacity);
209 }
210 }
211
212 G1CMMarkStack::~G1CMMarkStack() {
213 if (_base != NULL) {
214 MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
215 }
216 }
217
218 void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) {
219 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
220 elem->next = *list;
221 *list = elem;
222 }
223
224 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
225 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
226
227 OopChunk* result = *list;
228 if (result != NULL) {
229 *list = (*list)->next;
230 }
231 return result;
232 }
233
234 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
235 // This dirty read is okay because we only ever increase the _hwm in parallel code.
236 if (_hwm >= _chunk_capacity) {
237 return NULL;
238 }
239
240 size_t cur_idx = Atomic::add(1, &_hwm) - 1;
241 if (cur_idx >= _chunk_capacity) {
242 return NULL;
243 }
244
245 OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
246 result->next = NULL;
247 return result;
248 }
249
250 void G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
251 // Get a new chunk.
252 OopChunk* new_chunk = remove_chunk_from_list(&_free_list);
253
254 if (new_chunk == NULL) {
255 // Did not get a chunk from the free list. Allocate from backing memory.
256 new_chunk = allocate_new_chunk();
257 }
258
259 if (new_chunk == NULL) {
260 _out_of_memory = true;
261 return;
262 }
263
264 for (size_t i = 0; i < OopsPerChunk; i++) {
265 new_chunk->data[i] = ptr_arr[i];
266 }
267
268 add_chunk_to_list(&_chunk_list, new_chunk);
269 Atomic::inc(&_chunks_in_chunk_list);
270 }
271
272 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
273 OopChunk* cur = remove_chunk_from_list(&_chunk_list);
274
275 if (cur == NULL) {
276 return false;
277 }
278
279 Atomic::dec(&_chunks_in_chunk_list);
280
281 for (size_t i = 0; i < OopsPerChunk; i++) {
282 ptr_arr[i] = (oop)cur->data[i];
283 }
284
285 add_chunk_to_list(&_free_list, cur);
286 return true;
287 }
288
289 void G1CMMarkStack::set_empty() {
290 _chunks_in_chunk_list = 0;
291 _hwm = 0;
292 clear_out_of_memory();
293 _chunk_list = NULL;
294 _free_list = NULL;
295 }
296
297 G1CMRootRegions::G1CMRootRegions() :
298 _cm(NULL), _scan_in_progress(false),
299 _should_abort(false), _claimed_survivor_index(0) { }
300
301 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
302 _survivors = survivors;
303 _cm = cm;
304 }
305
306 void G1CMRootRegions::prepare_for_scan() {
307 assert(!scan_in_progress(), "pre-condition");
308
309 // Currently, only survivors can be root regions.
310 _claimed_survivor_index = 0;
311 _scan_in_progress = _survivors->regions()->is_nonempty();
312 _should_abort = false;
313 }
314
502 // Verify MarkStackSize is in range.
503 if (FLAG_IS_CMDLINE(MarkStackSize)) {
504 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
505 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
506 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
507 "must be between 1 and " SIZE_FORMAT,
508 MarkStackSize, MarkStackSizeMax);
509 return;
510 }
511 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
512 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
513 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
514 " or for MarkStackSizeMax (" SIZE_FORMAT ")",
515 MarkStackSize, MarkStackSizeMax);
516 return;
517 }
518 }
519 }
520 }
521
522 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
523 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
524 }
525
526 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC);
527 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
528
529 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
530 _active_tasks = _max_worker_id;
531
532 for (uint i = 0; i < _max_worker_id; ++i) {
533 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
534 task_queue->initialize();
535 _task_queues->register_queue(i, task_queue);
536
537 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues);
538
539 _accum_task_vtime[i] = 0.0;
540 }
541
542 // so that the call below can read a sensible value
543 _heap_start = g1h->reserved_region().start();
1711
1712 // Set the degree of MT processing here. If the discovery was done MT,
1713 // the number of threads involved during discovery could differ from
1714 // the number of active workers. This is OK as long as the discovered
1715 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1716 rp->set_active_mt_degree(active_workers);
1717
1718 // Process the weak references.
1719 const ReferenceProcessorStats& stats =
1720 rp->process_discovered_references(&g1_is_alive,
1721 &g1_keep_alive,
1722 &g1_drain_mark_stack,
1723 executor,
1724 _gc_timer_cm);
1725 _gc_tracer_cm->report_gc_reference_stats(stats);
1726
1727 // The do_oop work routines of the keep_alive and drain_marking_stack
1728 // oop closures will set the has_overflown flag if we overflow the
1729 // global marking stack.
1730
1731 assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1732 "mark stack should be empty (unless it overflowed)");
1733
1734 if (_global_mark_stack.is_out_of_memory()) {
1735 // This should have been done already when we tried to push an
1736 // entry on to the global mark stack. But let's do it again.
1737 set_has_overflown();
1738 }
1739
1740 assert(rp->num_q() == active_workers, "why not");
1741
1742 rp->enqueue_discovered_references(executor);
1743
1744 rp->verify_no_references_recorded();
1745 assert(!rp->discovery_enabled(), "Post condition");
1746 }
1747
1748 if (has_overflown()) {
1749 // We can not trust g1_is_alive if the marking stack overflowed
1750 return;
1751 }
1752
1753 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1754
2358 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2359 _words_scanned_limit = _real_words_scanned_limit;
2360
2361 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2362 _refs_reached_limit = _real_refs_reached_limit;
2363 }
2364
2365 void G1CMTask::decrease_limits() {
2366 // This is called when we believe that we're going to do an infrequent
2367 // operation which will increase the per byte scanned cost (i.e. move
2368 // entries to/from the global stack). It basically tries to decrease the
2369 // scanning limit so that the clock is called earlier.
2370
2371 _words_scanned_limit = _real_words_scanned_limit -
2372 3 * words_scanned_period / 4;
2373 _refs_reached_limit = _real_refs_reached_limit -
2374 3 * refs_reached_period / 4;
2375 }
2376
2377 void G1CMTask::move_entries_to_global_stack() {
2378 // Local array where we'll store the entries that will be popped
2379 // from the local queue.
2380 oop buffer[G1CMMarkStack::OopsPerChunk];
2381
2382 size_t n = 0;
2383 oop obj;
2384 while (n < G1CMMarkStack::OopsPerChunk && _task_queue->pop_local(obj)) {
2385 buffer[n] = obj;
2386 ++n;
2387 }
2388 if (n < G1CMMarkStack::OopsPerChunk) {
2389 buffer[n] = NULL;
2390 }
2391
2392 if (n > 0) {
2393 if (!_cm->mark_stack_push(buffer)) {
2394 set_has_aborted();
2395 }
2396 }
2397
2398 // This operation was quite expensive, so decrease the limits.
2399 decrease_limits();
2400 }
2401
2402 bool G1CMTask::get_entries_from_global_stack() {
2403 // Local array where we'll store the entries that will be popped
2404 // from the global stack.
2405 oop buffer[G1CMMarkStack::OopsPerChunk];
2406
2407 if (!_cm->mark_stack_pop(buffer)) {
2408 return false;
2409 }
2410
2411 // We did actually pop at least one entry.
2412 for (size_t i = 0; i < G1CMMarkStack::OopsPerChunk; ++i) {
2413 oop elem = buffer[i];
2414 if (elem == NULL) {
2415 break;
2416 }
2417 bool success = _task_queue->push(elem);
2418 // We only call this when the local queue is empty or under a
2419 // given target limit. So, we do not expect this push to fail.
2420 assert(success, "invariant");
2421 }
2422
2423 // This operation was quite expensive, so decrease the limits
2424 decrease_limits();
2425 return true;
2426 }
2427
2428 void G1CMTask::drain_local_queue(bool partially) {
2429 if (has_aborted()) return;
2430
2431 // Decide what the target size is, depending whether we're going to
2432 // drain it partially (so that other tasks can steal if they run out
2433 // of things to do) or totally (at the very end).
2434 size_t target_size;
2435 if (partially) {
2436 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
2437 } else {
2438 target_size = 0;
2439 }
2440
2441 if (_task_queue->size() > target_size) {
2442 oop obj;
2443 bool ret = _task_queue->pop_local(obj);
2444 while (ret) {
2445 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
2449 scan_object(obj);
2450
2451 if (_task_queue->size() <= target_size || has_aborted()) {
2452 ret = false;
2453 } else {
2454 ret = _task_queue->pop_local(obj);
2455 }
2456 }
2457 }
2458 }
2459
2460 void G1CMTask::drain_global_stack(bool partially) {
2461 if (has_aborted()) return;
2462
2463 // We have a policy to drain the local queue before we attempt to
2464 // drain the global stack.
2465 assert(partially || _task_queue->size() == 0, "invariant");
2466
2467 // Decide what the target size is, depending whether we're going to
2468 // drain it partially (so that other tasks can steal if they run out
2469 // of things to do) or totally (at the very end).
2470 // Notice that when draining the global mark stack partially, due to the racyness
2471 // of the mark stack size update we might in fact drop below the target. But,
2472 // this is not a problem.
2473 // In case of total draining, we simply process until the global mark stack is
2474 // totally empty, disregarding the size counter.
2475 if (partially) {
2476 size_t const target_size = _cm->partial_mark_stack_size_target();
2477 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2478 if (get_entries_from_global_stack()) {
2479 drain_local_queue(partially);
2480 }
2481 }
2482 } else {
2483 while (!has_aborted() && get_entries_from_global_stack()) {
2484 drain_local_queue(partially);
2485 }
2486 }
2487 }
2488
2489 // SATB Queue has several assumptions on whether to call the par or
2490 // non-par versions of the methods. this is why some of the code is
2491 // replicated. We should really get rid of the single-threaded version
2492 // of the code to simplify things.
2493 void G1CMTask::drain_satb_buffers() {
2494 if (has_aborted()) return;
2495
2496 // We set this so that the regular clock knows that we're in the
2497 // middle of draining buffers and doesn't set the abort flag when it
2498 // notices that SATB buffers are available for draining. It'd be
2499 // very counter productive if it did that. :-)
2500 _draining_satb_buffers = true;
2501
2502 G1CMSATBBufferClosure satb_cl(this, _g1h);
2503 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|