327 }
328
329 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
330 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
331 jint index = _index;
332 if (index == 0) {
333 *n = 0;
334 return false;
335 } else {
336 int k = MIN2(max, index);
337 jint new_ind = index - k;
338 for (int j = 0; j < k; j++) {
339 ptr_arr[j] = _base[new_ind + j];
340 }
341 _index = new_ind;
342 *n = k;
343 return true;
344 }
345 }
346
347 template<class OopClosureClass>
348 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
349 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
350 || SafepointSynchronize::is_at_safepoint(),
351 "Drain recursion must be yield-safe.");
352 bool res = true;
353 debug_only(_drain_in_progress = true);
354 debug_only(_drain_in_progress_yields = yield_after);
355 while (!isEmpty()) {
356 oop newOop = pop();
357 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
358 assert(newOop->is_oop(), "Expected an oop");
359 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
360 "only grey objects on this stack");
361 newOop->oop_iterate(cl);
362 if (yield_after && _cm->do_yield_check()) {
363 res = false;
364 break;
365 }
366 }
367 debug_only(_drain_in_progress = false);
368 return res;
369 }
370
371 void CMMarkStack::note_start_of_gc() {
372 assert(_saved_index == -1,
373 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
374 _saved_index = _index;
375 }
376
377 void CMMarkStack::note_end_of_gc() {
378 // This is intentionally a guarantee, instead of an assert. If we
379 // accidentally add something to the mark stack during GC, it
380 // will be a correctness issue so it's better if we crash. we'll
381 // only check this once per GC anyway, so it won't be a performance
|
327 }
328
329 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
330 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
331 jint index = _index;
332 if (index == 0) {
333 *n = 0;
334 return false;
335 } else {
336 int k = MIN2(max, index);
337 jint new_ind = index - k;
338 for (int j = 0; j < k; j++) {
339 ptr_arr[j] = _base[new_ind + j];
340 }
341 _index = new_ind;
342 *n = k;
343 return true;
344 }
345 }
346
347 template<bool nv, typename OopClosureClass>
348 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
349 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
350 || SafepointSynchronize::is_at_safepoint(),
351 "Drain recursion must be yield-safe.");
352 bool res = true;
353 debug_only(_drain_in_progress = true);
354 debug_only(_drain_in_progress_yields = yield_after);
355 while (!isEmpty()) {
356 oop newOop = pop();
357 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
358 assert(newOop->is_oop(), "Expected an oop");
359 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
360 "only grey objects on this stack");
361 newOop->oop_iterate<nv>(cl);
362 if (yield_after && _cm->do_yield_check()) {
363 res = false;
364 break;
365 }
366 }
367 debug_only(_drain_in_progress = false);
368 return res;
369 }
370
371 void CMMarkStack::note_start_of_gc() {
372 assert(_saved_index == -1,
373 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
374 _saved_index = _index;
375 }
376
377 void CMMarkStack::note_end_of_gc() {
378 // This is intentionally a guarantee, instead of an assert. If we
379 // accidentally add something to the mark stack during GC, it
380 // will be a correctness issue so it's better if we crash. we'll
381 // only check this once per GC anyway, so it won't be a performance
|