119 if (zero_filled) {
120 return;
121 }
122 // We need to clear the bitmap on commit, removing any existing information.
123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
124 _bm->clear_range(mr);
125 }
126
127 void G1CMBitMap::clear_range(MemRegion mr) {
128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
129 assert(!mr.is_empty(), "unexpected empty region");
130 // convert address range into offset range
131 _bm.at_put_range(heapWordToOffset(mr.start()),
132 heapWordToOffset(mr.end()), false);
133 }
134
135 G1CMMarkStack::G1CMMarkStack() :
136 _max_chunk_capacity(0),
137 _base(NULL),
138 _chunk_capacity(0),
139 _out_of_memory(false),
140 _should_expand(false) {
141 set_empty();
142 }
143
144 bool G1CMMarkStack::resize(size_t new_capacity) {
145 assert(is_empty(), "Only resize when stack is empty.");
146 assert(new_capacity <= _max_chunk_capacity,
147 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
148
149 OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity);
150
151 if (new_base == NULL) {
152 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk));
153 return false;
154 }
155 // Release old mapping.
156 if (_base != NULL) {
157 MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
158 }
159
261 return NULL;
262 }
263
264 size_t cur_idx = Atomic::add(1, &_hwm) - 1;
265 if (cur_idx >= _chunk_capacity) {
266 return NULL;
267 }
268
269 OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
270 result->next = NULL;
271 return result;
272 }
273
274 bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
275 // Get a new chunk.
276 OopChunk* new_chunk = remove_chunk_from_free_list();
277
278 if (new_chunk == NULL) {
279 // Did not get a chunk from the free list. Allocate from backing memory.
280 new_chunk = allocate_new_chunk();
281 }
282
283 if (new_chunk == NULL) {
284 _out_of_memory = true;
285 return false;
286 }
287
288 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, OopsPerChunk * sizeof(oop));
289
290 add_chunk_to_chunk_list(new_chunk);
291
292 return true;
293 }
294
295 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
296 OopChunk* cur = remove_chunk_from_chunk_list();
297
298 if (cur == NULL) {
299 return false;
300 }
301
302 Copy::conjoint_memory_atomic(cur->data, ptr_arr, OopsPerChunk * sizeof(oop));
303
304 add_chunk_to_free_list(cur);
305 return true;
306 }
307
308 void G1CMMarkStack::set_empty() {
309 _chunks_in_chunk_list = 0;
310 _hwm = 0;
311 clear_out_of_memory();
312 _chunk_list = NULL;
313 _free_list = NULL;
314 }
315
316 G1CMRootRegions::G1CMRootRegions() :
317 _cm(NULL), _scan_in_progress(false),
318 _should_abort(false), _claimed_survivor_index(0) { }
319
320 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
321 _survivors = survivors;
322 _cm = cm;
323 }
324
325 void G1CMRootRegions::prepare_for_scan() {
326 assert(!scan_in_progress(), "pre-condition");
327
328 // Currently, only survivors can be root regions.
329 _claimed_survivor_index = 0;
330 _scan_in_progress = _survivors->regions()->is_nonempty();
331 _should_abort = false;
575 assert(_heap_start != NULL, "heap bounds should look ok");
576 assert(_heap_end != NULL, "heap bounds should look ok");
577 assert(_heap_start < _heap_end, "heap bounds should look ok");
578
579 // Reset all the marking data structures and any necessary flags
580 reset_marking_state();
581
582 // We do reset all of them, since different phases will use
583 // different number of active threads. So, it's easiest to have all
584 // of them ready.
585 for (uint i = 0; i < _max_worker_id; ++i) {
586 _tasks[i]->reset(_nextMarkBitMap);
587 }
588
589 // we need this to make sure that the flag is on during the evac
590 // pause with initial mark piggy-backed
591 set_concurrent_marking_in_progress();
592 }
593
594
595 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) {
596 _global_mark_stack.set_should_expand(has_overflown());
597 _global_mark_stack.set_empty(); // Also clears the overflow stack's overflow flag
598 if (clear_overflow) {
599 clear_has_overflown();
600 } else {
601 assert(has_overflown(), "pre-condition");
602 }
603 _finger = _heap_start;
604
605 for (uint i = 0; i < _max_worker_id; ++i) {
606 G1CMTaskQueue* queue = _task_queues->queue(i);
607 queue->set_empty();
608 }
609 }
610
611 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
612 assert(active_tasks <= _max_worker_id, "we should not have more");
613
614 _active_tasks = active_tasks;
615 // Need to update the three data structures below according to the
616 // number of active threads for this phase.
617 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
618 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
619 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
620 }
621
622 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
866 if (barrier_aborted) {
867 // If the barrier aborted we ignore the overflow condition and
868 // just abort the whole marking phase as quickly as possible.
869 return;
870 }
871
872 // If we're executing the concurrent phase of marking, reset the marking
873 // state; otherwise the marking state is reset after reference processing,
874 // during the remark pause.
875 // If we reset here as a result of an overflow during the remark we will
876 // see assertion failures from any subsequent set_concurrency_and_phase()
877 // calls.
878 if (concurrent()) {
879 // let the task associated with with worker 0 do this
880 if (worker_id == 0) {
881 // task 0 is responsible for clearing the global data structures
882 // We should be here because of an overflow. During STW we should
883 // not clear the overflow flag since we rely on it being true when
884 // we exit this method to abort the pause and restart concurrent
885 // marking.
886 reset_marking_state(true /* clear_overflow */);
887
888 log_info(gc, marking)("Concurrent Mark reset for overflow");
889 }
890 }
891
892 // after this, each task should reset its own data structures then
893 // then go into the second barrier
894 }
895
896 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
897 SuspendibleThreadSetLeaver sts_leave(concurrent());
898 _second_overflow_barrier_sync.enter();
899
900 // at this point everything should be re-initialized and ready to go
901 }
902
903 class G1CMConcurrentMarkingTask: public AbstractGangTask {
904 private:
905 G1ConcurrentMark* _cm;
906 ConcurrentMarkThread* _cmt;
1732
1733 // Set the degree of MT processing here. If the discovery was done MT,
1734 // the number of threads involved during discovery could differ from
1735 // the number of active workers. This is OK as long as the discovered
1736 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1737 rp->set_active_mt_degree(active_workers);
1738
1739 // Process the weak references.
1740 const ReferenceProcessorStats& stats =
1741 rp->process_discovered_references(&g1_is_alive,
1742 &g1_keep_alive,
1743 &g1_drain_mark_stack,
1744 executor,
1745 _gc_timer_cm);
1746 _gc_tracer_cm->report_gc_reference_stats(stats);
1747
1748 // The do_oop work routines of the keep_alive and drain_marking_stack
1749 // oop closures will set the has_overflown flag if we overflow the
1750 // global marking stack.
1751
1752 assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1753 "Mark stack should be empty (unless it is out of memory)");
1754
1755 if (_global_mark_stack.is_out_of_memory()) {
1756 // This should have been done already when we tried to push an
1757 // entry on to the global mark stack. But let's do it again.
1758 set_has_overflown();
1759 }
1760
1761 assert(rp->num_q() == active_workers, "why not");
1762
1763 rp->enqueue_discovered_references(executor);
1764
1765 rp->verify_no_references_recorded();
1766 assert(!rp->discovery_enabled(), "Post condition");
1767 }
1768
1769 if (has_overflown()) {
1770 // We can not trust g1_is_alive if the marking stack overflowed
1771 return;
1772 }
1773
1774 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1775
1776 // Unload Klasses, String, Symbols, Code Cache, etc.
1777 if (ClassUnloadingWithConcurrentMark) {
1778 bool purged_classes;
1779
1780 {
2914
2915 if (_worker_id == 0) {
2916 // let's allow task 0 to do this
2917 if (concurrent()) {
2918 assert(_cm->concurrent_marking_in_progress(), "invariant");
2919 // we need to set this to false before the next
2920 // safepoint. This way we ensure that the marking phase
2921 // doesn't observe any more heap expansions.
2922 _cm->clear_concurrent_marking_in_progress();
2923 }
2924 }
2925
2926 // We can now guarantee that the global stack is empty, since
2927 // all other tasks have finished. We separated the guarantees so
2928 // that, if a condition is false, we can immediately find out
2929 // which one.
2930 guarantee(_cm->out_of_regions(), "only way to reach here");
2931 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2932 guarantee(_task_queue->size() == 0, "only way to reach here");
2933 guarantee(!_cm->has_overflown(), "only way to reach here");
2934 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
2935 } else {
2936 // Apparently there's more work to do. Let's abort this task. It
2937 // will restart it and we can hopefully find more things to do.
2938 set_has_aborted();
2939 }
2940 }
2941
2942 // Mainly for debugging purposes to make sure that a pointer to the
2943 // closure which was statically allocated in this frame doesn't
2944 // escape it by accident.
2945 set_cm_oop_closure(NULL);
2946 double end_time_ms = os::elapsedVTime() * 1000.0;
2947 double elapsed_time_ms = end_time_ms - _start_time_ms;
2948 // Update the step history.
2949 _step_times_ms.add(elapsed_time_ms);
2950
2951 if (has_aborted()) {
2952 // The task was aborted for some reason.
2953 if (_has_timed_out) {
2954 double diff_ms = elapsed_time_ms - _time_target_ms;
|
119 if (zero_filled) {
120 return;
121 }
122 // We need to clear the bitmap on commit, removing any existing information.
123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
124 _bm->clear_range(mr);
125 }
126
127 void G1CMBitMap::clear_range(MemRegion mr) {
128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
129 assert(!mr.is_empty(), "unexpected empty region");
130 // convert address range into offset range
131 _bm.at_put_range(heapWordToOffset(mr.start()),
132 heapWordToOffset(mr.end()), false);
133 }
134
135 G1CMMarkStack::G1CMMarkStack() :
136 _max_chunk_capacity(0),
137 _base(NULL),
138 _chunk_capacity(0),
139 _should_expand(false) {
140 set_empty();
141 }
142
143 bool G1CMMarkStack::resize(size_t new_capacity) {
144 assert(is_empty(), "Only resize when stack is empty.");
145 assert(new_capacity <= _max_chunk_capacity,
146 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
147
148 OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity);
149
150 if (new_base == NULL) {
151 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk));
152 return false;
153 }
154 // Release old mapping.
155 if (_base != NULL) {
156 MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
157 }
158
260 return NULL;
261 }
262
263 size_t cur_idx = Atomic::add(1, &_hwm) - 1;
264 if (cur_idx >= _chunk_capacity) {
265 return NULL;
266 }
267
268 OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
269 result->next = NULL;
270 return result;
271 }
272
273 bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
274 // Get a new chunk.
275 OopChunk* new_chunk = remove_chunk_from_free_list();
276
277 if (new_chunk == NULL) {
278 // Did not get a chunk from the free list. Allocate from backing memory.
279 new_chunk = allocate_new_chunk();
280
281 if (new_chunk == NULL) {
282 return false;
283 }
284 }
285
286 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, OopsPerChunk * sizeof(oop));
287
288 add_chunk_to_chunk_list(new_chunk);
289
290 return true;
291 }
292
293 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
294 OopChunk* cur = remove_chunk_from_chunk_list();
295
296 if (cur == NULL) {
297 return false;
298 }
299
300 Copy::conjoint_memory_atomic(cur->data, ptr_arr, OopsPerChunk * sizeof(oop));
301
302 add_chunk_to_free_list(cur);
303 return true;
304 }
305
306 void G1CMMarkStack::set_empty() {
307 _chunks_in_chunk_list = 0;
308 _hwm = 0;
309 _chunk_list = NULL;
310 _free_list = NULL;
311 }
312
313 G1CMRootRegions::G1CMRootRegions() :
314 _cm(NULL), _scan_in_progress(false),
315 _should_abort(false), _claimed_survivor_index(0) { }
316
317 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
318 _survivors = survivors;
319 _cm = cm;
320 }
321
322 void G1CMRootRegions::prepare_for_scan() {
323 assert(!scan_in_progress(), "pre-condition");
324
325 // Currently, only survivors can be root regions.
326 _claimed_survivor_index = 0;
327 _scan_in_progress = _survivors->regions()->is_nonempty();
328 _should_abort = false;
572 assert(_heap_start != NULL, "heap bounds should look ok");
573 assert(_heap_end != NULL, "heap bounds should look ok");
574 assert(_heap_start < _heap_end, "heap bounds should look ok");
575
576 // Reset all the marking data structures and any necessary flags
577 reset_marking_state();
578
579 // We do reset all of them, since different phases will use
580 // different number of active threads. So, it's easiest to have all
581 // of them ready.
582 for (uint i = 0; i < _max_worker_id; ++i) {
583 _tasks[i]->reset(_nextMarkBitMap);
584 }
585
586 // we need this to make sure that the flag is on during the evac
587 // pause with initial mark piggy-backed
588 set_concurrent_marking_in_progress();
589 }
590
591
592 void G1ConcurrentMark::reset_marking_state() {
593 _global_mark_stack.set_should_expand(has_overflown());
594 _global_mark_stack.set_empty();
595 clear_has_overflown();
596 _finger = _heap_start;
597
598 for (uint i = 0; i < _max_worker_id; ++i) {
599 G1CMTaskQueue* queue = _task_queues->queue(i);
600 queue->set_empty();
601 }
602 }
603
604 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
605 assert(active_tasks <= _max_worker_id, "we should not have more");
606
607 _active_tasks = active_tasks;
608 // Need to update the three data structures below according to the
609 // number of active threads for this phase.
610 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
611 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
612 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
613 }
614
615 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
859 if (barrier_aborted) {
860 // If the barrier aborted we ignore the overflow condition and
861 // just abort the whole marking phase as quickly as possible.
862 return;
863 }
864
865 // If we're executing the concurrent phase of marking, reset the marking
866 // state; otherwise the marking state is reset after reference processing,
867 // during the remark pause.
868 // If we reset here as a result of an overflow during the remark we will
869 // see assertion failures from any subsequent set_concurrency_and_phase()
870 // calls.
871 if (concurrent()) {
872 // let the task associated with with worker 0 do this
873 if (worker_id == 0) {
874 // task 0 is responsible for clearing the global data structures
875 // We should be here because of an overflow. During STW we should
876 // not clear the overflow flag since we rely on it being true when
877 // we exit this method to abort the pause and restart concurrent
878 // marking.
879 reset_marking_state();
880
881 log_info(gc, marking)("Concurrent Mark reset for overflow");
882 }
883 }
884
885 // after this, each task should reset its own data structures then
886 // then go into the second barrier
887 }
888
889 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
890 SuspendibleThreadSetLeaver sts_leave(concurrent());
891 _second_overflow_barrier_sync.enter();
892
893 // at this point everything should be re-initialized and ready to go
894 }
895
896 class G1CMConcurrentMarkingTask: public AbstractGangTask {
897 private:
898 G1ConcurrentMark* _cm;
899 ConcurrentMarkThread* _cmt;
1725
1726 // Set the degree of MT processing here. If the discovery was done MT,
1727 // the number of threads involved during discovery could differ from
1728 // the number of active workers. This is OK as long as the discovered
1729 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1730 rp->set_active_mt_degree(active_workers);
1731
1732 // Process the weak references.
1733 const ReferenceProcessorStats& stats =
1734 rp->process_discovered_references(&g1_is_alive,
1735 &g1_keep_alive,
1736 &g1_drain_mark_stack,
1737 executor,
1738 _gc_timer_cm);
1739 _gc_tracer_cm->report_gc_reference_stats(stats);
1740
1741 // The do_oop work routines of the keep_alive and drain_marking_stack
1742 // oop closures will set the has_overflown flag if we overflow the
1743 // global marking stack.
1744
1745 assert(has_overflown() || _global_mark_stack.is_empty(),
1746 "Mark stack should be empty (unless it is out of memory)");
1747
1748 assert(rp->num_q() == active_workers, "why not");
1749
1750 rp->enqueue_discovered_references(executor);
1751
1752 rp->verify_no_references_recorded();
1753 assert(!rp->discovery_enabled(), "Post condition");
1754 }
1755
1756 if (has_overflown()) {
1757 // We can not trust g1_is_alive if the marking stack overflowed
1758 return;
1759 }
1760
1761 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1762
1763 // Unload Klasses, String, Symbols, Code Cache, etc.
1764 if (ClassUnloadingWithConcurrentMark) {
1765 bool purged_classes;
1766
1767 {
2901
2902 if (_worker_id == 0) {
2903 // let's allow task 0 to do this
2904 if (concurrent()) {
2905 assert(_cm->concurrent_marking_in_progress(), "invariant");
2906 // we need to set this to false before the next
2907 // safepoint. This way we ensure that the marking phase
2908 // doesn't observe any more heap expansions.
2909 _cm->clear_concurrent_marking_in_progress();
2910 }
2911 }
2912
2913 // We can now guarantee that the global stack is empty, since
2914 // all other tasks have finished. We separated the guarantees so
2915 // that, if a condition is false, we can immediately find out
2916 // which one.
2917 guarantee(_cm->out_of_regions(), "only way to reach here");
2918 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2919 guarantee(_task_queue->size() == 0, "only way to reach here");
2920 guarantee(!_cm->has_overflown(), "only way to reach here");
2921 } else {
2922 // Apparently there's more work to do. Let's abort this task. It
2923 // will restart it and we can hopefully find more things to do.
2924 set_has_aborted();
2925 }
2926 }
2927
2928 // Mainly for debugging purposes to make sure that a pointer to the
2929 // closure which was statically allocated in this frame doesn't
2930 // escape it by accident.
2931 set_cm_oop_closure(NULL);
2932 double end_time_ms = os::elapsedVTime() * 1000.0;
2933 double elapsed_time_ms = end_time_ms - _start_time_ms;
2934 // Update the step history.
2935 _step_times_ms.add(elapsed_time_ms);
2936
2937 if (has_aborted()) {
2938 // The task was aborted for some reason.
2939 if (_has_timed_out) {
2940 double diff_ms = elapsed_time_ms - _time_target_ms;
|