2161 } else {
2162 assert(_cm_oop_closure == NULL, "invariant");
2163 }
2164 _cm_oop_closure = cm_oop_closure;
2165 }
2166
2167 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2168 guarantee(next_mark_bitmap != NULL, "invariant");
2169 _next_mark_bitmap = next_mark_bitmap;
2170 clear_region_fields();
2171
2172 _calls = 0;
2173 _elapsed_time_ms = 0.0;
2174 _termination_time_ms = 0.0;
2175 _termination_start_time_ms = 0.0;
2176
2177 _mark_stats_cache.reset();
2178 }
2179
2180 bool G1CMTask::should_exit_termination() {
2181 regular_clock_call();
2182 // This is called when we are in the termination protocol. We should
2183 // quit if, for some reason, this task wants to abort or the global
2184 // stack is not empty (this means that we can get work from it).
2185 return !_cm->mark_stack_empty() || has_aborted();
2186 }
2187
2188 void G1CMTask::reached_limit() {
2189 assert(_words_scanned >= _words_scanned_limit ||
2190 _refs_reached >= _refs_reached_limit ,
2191 "shouldn't have been called otherwise");
2192 regular_clock_call();
2193 }
2194
2195 void G1CMTask::regular_clock_call() {
2196 if (has_aborted()) {
2197 return;
2198 }
2199
2200 // First, we need to recalculate the words scanned and refs reached
2201 // limits for the next clock call.
2202 recalculate_limits();
2203
2204 // During the regular clock call we do the following
2205
2206 // (1) If an overflow has been flagged, then we abort.
2207 if (_cm->has_overflown()) {
2208 set_has_aborted();
2209 return;
2210 }
2211
2212 // If we are not concurrent (i.e. we're doing remark) we don't need
2213 // to check anything else. The other steps are only needed during
2214 // the concurrent marking phase.
2215 if (!_cm->concurrent()) {
2216 return;
2217 }
2218
2219 // (2) If marking has been aborted for Full GC, then we also abort.
2220 if (_cm->has_aborted()) {
2221 set_has_aborted();
2222 return;
2223 }
2224
2225 double curr_time_ms = os::elapsedVTime() * 1000.0;
2226
2227 // (4) We check whether we should yield. If we have to, then we abort.
2228 if (SuspendibleThreadSet::should_yield()) {
2229 // We should yield. To do this we abort the task. The caller is
2230 // responsible for yielding.
2231 set_has_aborted();
2232 return;
2233 }
2234
2235 // (5) We check whether we've reached our time quota. If we have,
2236 // then we abort.
2237 double elapsed_time_ms = curr_time_ms - _start_time_ms;
2238 if (elapsed_time_ms > _time_target_ms) {
2239 set_has_aborted();
2240 _has_timed_out = true;
2241 return;
2242 }
2243
2244 // (6) Finally, we check whether there are enough completed STAB
2245 // buffers available for processing. If there are, we abort.
2246 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2247 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2248 // we do need to process SATB buffers, we'll abort and restart
2249 // the marking task to do so
2250 set_has_aborted();
2251 return;
2252 }
2253 }
2254
2255 void G1CMTask::recalculate_limits() {
2256 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2257 _words_scanned_limit = _real_words_scanned_limit;
2258
2259 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2260 _refs_reached_limit = _real_refs_reached_limit;
2261 }
2262
2263 void G1CMTask::decrease_limits() {
2264 // This is called when we believe that we're going to do an infrequent
2265 // operation which will increase the per byte scanned cost (i.e. move
2266 // entries to/from the global stack). It basically tries to decrease the
2267 // scanning limit so that the clock is called earlier.
2268
2269 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2270 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2271 }
2272
2387 // replicated. We should really get rid of the single-threaded version
2388 // of the code to simplify things.
2389 void G1CMTask::drain_satb_buffers() {
2390 if (has_aborted()) {
2391 return;
2392 }
2393
2394 // We set this so that the regular clock knows that we're in the
2395 // middle of draining buffers and doesn't set the abort flag when it
2396 // notices that SATB buffers are available for draining. It'd be
2397 // very counter productive if it did that. :-)
2398 _draining_satb_buffers = true;
2399
2400 G1CMSATBBufferClosure satb_cl(this, _g1h);
2401 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2402
2403 // This keeps claiming and applying the closure to completed buffers
2404 // until we run out of buffers or we need to abort.
2405 while (!has_aborted() &&
2406 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2407 regular_clock_call();
2408 }
2409
2410 _draining_satb_buffers = false;
2411
2412 assert(has_aborted() ||
2413 _cm->concurrent() ||
2414 satb_mq_set.completed_buffers_num() == 0, "invariant");
2415
2416 // again, this was a potentially expensive operation, decrease the
2417 // limits to get the regular clock call early
2418 decrease_limits();
2419 }
2420
2421 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2422 _mark_stats_cache.reset(region_idx);
2423 }
2424
2425 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2426 return _mark_stats_cache.evict_all();
2427 }
2630 // We will start from _finger not from the start of the region,
2631 // as we might be restarting this task after aborting half-way
2632 // through scanning this region. In this case, _finger points to
2633 // the address where we last found a marked object. If this is a
2634 // fresh region, _finger points to start().
2635 MemRegion mr = MemRegion(_finger, _region_limit);
2636
2637 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2638 "humongous regions should go around loop once only");
2639
2640 // Some special cases:
2641 // If the memory region is empty, we can just give up the region.
2642 // If the current region is humongous then we only need to check
2643 // the bitmap for the bit associated with the start of the object,
2644 // scan the object if it's live, and give up the region.
2645 // Otherwise, let's iterate over the bitmap of the part of the region
2646 // that is left.
2647 // If the iteration is successful, give up the region.
2648 if (mr.is_empty()) {
2649 giveup_current_region();
2650 regular_clock_call();
2651 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2652 if (_next_mark_bitmap->is_marked(mr.start())) {
2653 // The object is marked - apply the closure
2654 bitmap_closure.do_addr(mr.start());
2655 }
2656 // Even if this task aborted while scanning the humongous object
2657 // we can (and should) give up the current region.
2658 giveup_current_region();
2659 regular_clock_call();
2660 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2661 giveup_current_region();
2662 regular_clock_call();
2663 } else {
2664 assert(has_aborted(), "currently the only way to do so");
2665 // The only way to abort the bitmap iteration is to return
2666 // false from the do_bit() method. However, inside the
2667 // do_bit() method we move the _finger to point to the
2668 // object currently being looked at. So, if we bail out, we
2669 // have definitely set _finger to something non-null.
2670 assert(_finger != NULL, "invariant");
2671
2672 // Region iteration was actually aborted. So now _finger
2673 // points to the address of the object we last scanned. If we
2674 // leave it there, when we restart this task, we will rescan
2675 // the object. It is easy to avoid this. We move the finger by
2676 // enough to point to the next possible object header.
2677 assert(_finger < _region_limit, "invariant");
2678 HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2679 // Check if bitmap iteration was aborted while scanning the last object
2680 if (new_finger >= _region_limit) {
2681 giveup_current_region();
2682 } else {
2697 // claiming and why we have to check out_of_regions() to determine
2698 // whether we're done or not.
2699 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2700 // We are going to try to claim a new region. We should have
2701 // given up on the previous one.
2702 // Separated the asserts so that we know which one fires.
2703 assert(_curr_region == NULL, "invariant");
2704 assert(_finger == NULL, "invariant");
2705 assert(_region_limit == NULL, "invariant");
2706 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2707 if (claimed_region != NULL) {
2708 // Yes, we managed to claim one
2709 setup_for_region(claimed_region);
2710 assert(_curr_region == claimed_region, "invariant");
2711 }
2712 // It is important to call the regular clock here. It might take
2713 // a while to claim a region if, for example, we hit a large
2714 // block of empty regions. So we need to call the regular clock
2715 // method once round the loop to make sure it's called
2716 // frequently enough.
2717 regular_clock_call();
2718 }
2719
2720 if (!has_aborted() && _curr_region == NULL) {
2721 assert(_cm->out_of_regions(),
2722 "at this point we should be out of regions");
2723 }
2724 } while ( _curr_region != NULL && !has_aborted());
2725
2726 if (!has_aborted()) {
2727 // We cannot check whether the global stack is empty, since other
2728 // tasks might be pushing objects to it concurrently.
2729 assert(_cm->out_of_regions(),
2730 "at this point we should be out of regions");
2731 // Try to reduce the number of available SATB buffers so that
2732 // remark has less work to do.
2733 drain_satb_buffers();
2734 }
2735
2736 // Since we've done everything else, we can now totally drain the
2737 // local queue and global stack.
2775 // The G1CMTask class also extends the TerminatorTerminator class,
2776 // hence its should_exit_termination() method will also decide
2777 // whether to exit the termination protocol or not.
2778 bool finished = (is_serial ||
2779 _cm->terminator()->offer_termination(this));
2780 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2781 _termination_time_ms +=
2782 termination_end_time_ms - _termination_start_time_ms;
2783
2784 if (finished) {
2785 // We're all done.
2786
2787 // We can now guarantee that the global stack is empty, since
2788 // all other tasks have finished. We separated the guarantees so
2789 // that, if a condition is false, we can immediately find out
2790 // which one.
2791 guarantee(_cm->out_of_regions(), "only way to reach here");
2792 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2793 guarantee(_task_queue->size() == 0, "only way to reach here");
2794 guarantee(!_cm->has_overflown(), "only way to reach here");
2795 } else {
2796 // Apparently there's more work to do. Let's abort this task. It
2797 // will restart it and we can hopefully find more things to do.
2798 set_has_aborted();
2799 }
2800 }
2801
2802 // Mainly for debugging purposes to make sure that a pointer to the
2803 // closure which was statically allocated in this frame doesn't
2804 // escape it by accident.
2805 set_cm_oop_closure(NULL);
2806 double end_time_ms = os::elapsedVTime() * 1000.0;
2807 double elapsed_time_ms = end_time_ms - _start_time_ms;
2808 // Update the step history.
2809 _step_times_ms.add(elapsed_time_ms);
2810
2811 if (has_aborted()) {
2812 // The task was aborted for some reason.
2813 if (_has_timed_out) {
2814 double diff_ms = elapsed_time_ms - _time_target_ms;
|
2161 } else {
2162 assert(_cm_oop_closure == NULL, "invariant");
2163 }
2164 _cm_oop_closure = cm_oop_closure;
2165 }
2166
2167 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2168 guarantee(next_mark_bitmap != NULL, "invariant");
2169 _next_mark_bitmap = next_mark_bitmap;
2170 clear_region_fields();
2171
2172 _calls = 0;
2173 _elapsed_time_ms = 0.0;
2174 _termination_time_ms = 0.0;
2175 _termination_start_time_ms = 0.0;
2176
2177 _mark_stats_cache.reset();
2178 }
2179
2180 bool G1CMTask::should_exit_termination() {
2181 if (!regular_clock_call()) {
2182 return true;
2183 }
2184
2185 // This is called when we are in the termination protocol. We should
2186 // quit if, for some reason, this task wants to abort or the global
2187 // stack is not empty (this means that we can get work from it).
2188 return !_cm->mark_stack_empty() || has_aborted();
2189 }
2190
2191 void G1CMTask::reached_limit() {
2192 assert(_words_scanned >= _words_scanned_limit ||
2193 _refs_reached >= _refs_reached_limit ,
2194 "shouldn't have been called otherwise");
2195 if (!regular_clock_call()) {
2196 set_has_aborted();
2197 }
2198 }
2199
2200 bool G1CMTask::regular_clock_call() {
2201 if (has_aborted()) {
2202 return false;
2203 }
2204
2205 // First, we need to recalculate the words scanned and refs reached
2206 // limits for the next clock call.
2207 recalculate_limits();
2208
2209 // During the regular clock call we do the following
2210
2211 // (1) If an overflow has been flagged, then we abort.
2212 if (_cm->has_overflown()) {
2213 return false;
2214 }
2215
2216 // If we are not concurrent (i.e. we're doing remark) we don't need
2217 // to check anything else. The other steps are only needed during
2218 // the concurrent marking phase.
2219 if (!_cm->concurrent()) {
2220 return true;
2221 }
2222
2223 // (2) If marking has been aborted for Full GC, then we also abort.
2224 if (_cm->has_aborted()) {
2225 return false;
2226 }
2227
2228 double curr_time_ms = os::elapsedVTime() * 1000.0;
2229
2230 // (4) We check whether we should yield. If we have to, then we abort.
2231 if (SuspendibleThreadSet::should_yield()) {
2232 // We should yield. To do this we abort the task. The caller is
2233 // responsible for yielding.
2234 return false;
2235 }
2236
2237 // (5) We check whether we've reached our time quota. If we have,
2238 // then we abort.
2239 double elapsed_time_ms = curr_time_ms - _start_time_ms;
2240 if (elapsed_time_ms > _time_target_ms) {
2241 _has_timed_out = true;
2242 return false;
2243 }
2244
2245 // (6) Finally, we check whether there are enough completed STAB
2246 // buffers available for processing. If there are, we abort.
2247 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2248 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2249 // we do need to process SATB buffers, we'll abort and restart
2250 // the marking task to do so
2251 return false;
2252 }
2253 return true;
2254 }
2255
2256 void G1CMTask::recalculate_limits() {
2257 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2258 _words_scanned_limit = _real_words_scanned_limit;
2259
2260 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2261 _refs_reached_limit = _real_refs_reached_limit;
2262 }
2263
2264 void G1CMTask::decrease_limits() {
2265 // This is called when we believe that we're going to do an infrequent
2266 // operation which will increase the per byte scanned cost (i.e. move
2267 // entries to/from the global stack). It basically tries to decrease the
2268 // scanning limit so that the clock is called earlier.
2269
2270 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2271 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2272 }
2273
2388 // replicated. We should really get rid of the single-threaded version
2389 // of the code to simplify things.
2390 void G1CMTask::drain_satb_buffers() {
2391 if (has_aborted()) {
2392 return;
2393 }
2394
2395 // We set this so that the regular clock knows that we're in the
2396 // middle of draining buffers and doesn't set the abort flag when it
2397 // notices that SATB buffers are available for draining. It'd be
2398 // very counter productive if it did that. :-)
2399 _draining_satb_buffers = true;
2400
2401 G1CMSATBBufferClosure satb_cl(this, _g1h);
2402 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2403
2404 // This keeps claiming and applying the closure to completed buffers
2405 // until we run out of buffers or we need to abort.
2406 while (!has_aborted() &&
2407 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2408 if(!regular_clock_call()) {
2409 set_has_aborted();
2410 }
2411 }
2412
2413 _draining_satb_buffers = false;
2414
2415 assert(has_aborted() ||
2416 _cm->concurrent() ||
2417 satb_mq_set.completed_buffers_num() == 0, "invariant");
2418
2419 // again, this was a potentially expensive operation, decrease the
2420 // limits to get the regular clock call early
2421 decrease_limits();
2422 }
2423
2424 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2425 _mark_stats_cache.reset(region_idx);
2426 }
2427
2428 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2429 return _mark_stats_cache.evict_all();
2430 }
2633 // We will start from _finger not from the start of the region,
2634 // as we might be restarting this task after aborting half-way
2635 // through scanning this region. In this case, _finger points to
2636 // the address where we last found a marked object. If this is a
2637 // fresh region, _finger points to start().
2638 MemRegion mr = MemRegion(_finger, _region_limit);
2639
2640 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2641 "humongous regions should go around loop once only");
2642
2643 // Some special cases:
2644 // If the memory region is empty, we can just give up the region.
2645 // If the current region is humongous then we only need to check
2646 // the bitmap for the bit associated with the start of the object,
2647 // scan the object if it's live, and give up the region.
2648 // Otherwise, let's iterate over the bitmap of the part of the region
2649 // that is left.
2650 // If the iteration is successful, give up the region.
2651 if (mr.is_empty()) {
2652 giveup_current_region();
2653 if (!regular_clock_call()) {
2654 set_has_aborted();
2655 }
2656 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2657 if (_next_mark_bitmap->is_marked(mr.start())) {
2658 // The object is marked - apply the closure
2659 bitmap_closure.do_addr(mr.start());
2660 }
2661 // Even if this task aborted while scanning the humongous object
2662 // we can (and should) give up the current region.
2663 giveup_current_region();
2664 if (!regular_clock_call()) {
2665 set_has_aborted();
2666 }
2667 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2668 giveup_current_region();
2669 if (!regular_clock_call()) {
2670 set_has_aborted();
2671 }
2672 } else {
2673 assert(has_aborted(), "currently the only way to do so");
2674 // The only way to abort the bitmap iteration is to return
2675 // false from the do_bit() method. However, inside the
2676 // do_bit() method we move the _finger to point to the
2677 // object currently being looked at. So, if we bail out, we
2678 // have definitely set _finger to something non-null.
2679 assert(_finger != NULL, "invariant");
2680
2681 // Region iteration was actually aborted. So now _finger
2682 // points to the address of the object we last scanned. If we
2683 // leave it there, when we restart this task, we will rescan
2684 // the object. It is easy to avoid this. We move the finger by
2685 // enough to point to the next possible object header.
2686 assert(_finger < _region_limit, "invariant");
2687 HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2688 // Check if bitmap iteration was aborted while scanning the last object
2689 if (new_finger >= _region_limit) {
2690 giveup_current_region();
2691 } else {
2706 // claiming and why we have to check out_of_regions() to determine
2707 // whether we're done or not.
2708 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2709 // We are going to try to claim a new region. We should have
2710 // given up on the previous one.
2711 // Separated the asserts so that we know which one fires.
2712 assert(_curr_region == NULL, "invariant");
2713 assert(_finger == NULL, "invariant");
2714 assert(_region_limit == NULL, "invariant");
2715 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2716 if (claimed_region != NULL) {
2717 // Yes, we managed to claim one
2718 setup_for_region(claimed_region);
2719 assert(_curr_region == claimed_region, "invariant");
2720 }
2721 // It is important to call the regular clock here. It might take
2722 // a while to claim a region if, for example, we hit a large
2723 // block of empty regions. So we need to call the regular clock
2724 // method once round the loop to make sure it's called
2725 // frequently enough.
2726 if (!regular_clock_call()) {
2727 set_has_aborted();
2728 }
2729 }
2730
2731 if (!has_aborted() && _curr_region == NULL) {
2732 assert(_cm->out_of_regions(),
2733 "at this point we should be out of regions");
2734 }
2735 } while ( _curr_region != NULL && !has_aborted());
2736
2737 if (!has_aborted()) {
2738 // We cannot check whether the global stack is empty, since other
2739 // tasks might be pushing objects to it concurrently.
2740 assert(_cm->out_of_regions(),
2741 "at this point we should be out of regions");
2742 // Try to reduce the number of available SATB buffers so that
2743 // remark has less work to do.
2744 drain_satb_buffers();
2745 }
2746
2747 // Since we've done everything else, we can now totally drain the
2748 // local queue and global stack.
2786 // The G1CMTask class also extends the TerminatorTerminator class,
2787 // hence its should_exit_termination() method will also decide
2788 // whether to exit the termination protocol or not.
2789 bool finished = (is_serial ||
2790 _cm->terminator()->offer_termination(this));
2791 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2792 _termination_time_ms +=
2793 termination_end_time_ms - _termination_start_time_ms;
2794
2795 if (finished) {
2796 // We're all done.
2797
2798 // We can now guarantee that the global stack is empty, since
2799 // all other tasks have finished. We separated the guarantees so
2800 // that, if a condition is false, we can immediately find out
2801 // which one.
2802 guarantee(_cm->out_of_regions(), "only way to reach here");
2803 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2804 guarantee(_task_queue->size() == 0, "only way to reach here");
2805 guarantee(!_cm->has_overflown(), "only way to reach here");
2806 guarantee(!has_aborted(), "should never happen if termination is completed");
2807 } else {
2808 // Apparently there's more work to do. Let's abort this task. It
2809 // will restart it and we can hopefully find more things to do.
2810 set_has_aborted();
2811 }
2812 }
2813
2814 // Mainly for debugging purposes to make sure that a pointer to the
2815 // closure which was statically allocated in this frame doesn't
2816 // escape it by accident.
2817 set_cm_oop_closure(NULL);
2818 double end_time_ms = os::elapsedVTime() * 1000.0;
2819 double elapsed_time_ms = end_time_ms - _start_time_ms;
2820 // Update the step history.
2821 _step_times_ms.add(elapsed_time_ms);
2822
2823 if (has_aborted()) {
2824 // The task was aborted for some reason.
2825 if (_has_timed_out) {
2826 double diff_ms = elapsed_time_ms - _time_target_ms;
|