335 _next_top_at_mark_start = top();
336 _next_marked_bytes = 0;
337 } else if (during_conc_mark) {
338 // During concurrent mark, all objects in the CSet (including
339 // the ones we find to be self-forwarded) are implicitly live.
340 // So all objects need to be above NTAMS.
341 _next_top_at_mark_start = bottom();
342 _next_marked_bytes = 0;
343 }
344 }
345
346 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
347 bool during_conc_mark,
348 size_t marked_bytes) {
349 assert(marked_bytes <= used(),
350 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
351 _prev_top_at_mark_start = top();
352 _prev_marked_bytes = marked_bytes;
353 }
354
355 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
356 FilterOutOfRegionClosure* cl,
357 jbyte* card_ptr) {
358 assert(card_ptr != NULL, "pre-condition");
359 G1CollectedHeap* g1h = G1CollectedHeap::heap();
360
361 // If we're within a stop-world GC, then we might look at a card in a
362 // GC alloc region that extends onto a GC LAB, which may not be
363 // parseable. Stop such at the "scan_top" of the region.
364 if (g1h->is_gc_active()) {
365 mr = mr.intersection(MemRegion(bottom(), scan_top()));
366 } else {
367 mr = mr.intersection(used_region());
368 }
369 if (mr.is_empty()) {
370 return true;
371 }
372 // Otherwise, find the obj that extends onto mr.start().
373
374 // The intersection of the incoming mr (for the card) and the
375 // allocated part of the region is non-empty. This implies that
376 // we have actually allocated into this region. The code in
377 // G1CollectedHeap.cpp that allocates a new region sets the
378 // is_young tag on the region before allocating. Thus we
379 // safely know if this region is young.
380 if (is_young()) {
381 return true;
382 }
383
384 // We can only clean the card here, after we make the decision that
385 // the card is not young.
386 *card_ptr = CardTableModRefBS::clean_card_val();
387 // We must complete this write before we do any of the reads below.
388 OrderAccess::storeload();
389
390 // Cache the boundaries of the memory region in some const locals
391 HeapWord* const start = mr.start();
392 HeapWord* const end = mr.end();
393
394 // Update BOT as needed while finding start of (potential) object.
395 HeapWord* cur = block_start(start);
396 assert(cur <= start, "Postcondition");
397
398 oop obj;
399
400 HeapWord* next = cur;
401 do {
402 cur = next;
403 obj = oop(cur);
404 if (obj->klass_or_null() == NULL) {
405 // Ran into an unparseable point.
406 assert(!g1h->is_gc_active(),
407 "Unparsable heap during GC at " PTR_FORMAT, p2i(cur));
408 return false;
409 }
410 // Otherwise...
411 next = cur + block_size(cur);
412 } while (next <= start);
413
414 // If we finish the above loop...We have a parseable object that
415 // begins on or before the start of the memory region, and ends
416 // inside or spans the entire region.
417 assert(cur <= start, "Loop postcondition");
418 assert(obj->klass_or_null() != NULL, "Loop postcondition");
419
420 do {
421 obj = oop(cur);
422 assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
423 if (obj->klass_or_null() == NULL) {
424 // Ran into an unparseable point.
425 assert(!g1h->is_gc_active(),
426 "Unparsable heap during GC at " PTR_FORMAT, p2i(cur));
427 return false;
428 }
429
430 // Advance the current pointer. "obj" still points to the object to iterate.
431 cur = cur + block_size(cur);
432
433 if (!g1h->is_obj_dead(obj)) {
434 // Non-objArrays are sometimes marked imprecise at the object start. We
435 // always need to iterate over them in full.
436 // We only iterate over object arrays in full if they are completely contained
437 // in the memory region.
438 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
439 obj->oop_iterate(cl);
440 } else {
441 obj->oop_iterate(cl, mr);
442 }
443 }
444 } while (cur < end);
445
446 return true;
447 }
448
449 // Code roots support
450
451 void HeapRegion::add_strong_code_root(nmethod* nm) {
452 HeapRegionRemSet* hrrs = rem_set();
453 hrrs->add_strong_code_root(nm);
454 }
455
456 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
457 assert_locked_or_safepoint(CodeCache_lock);
|
335 _next_top_at_mark_start = top();
336 _next_marked_bytes = 0;
337 } else if (during_conc_mark) {
338 // During concurrent mark, all objects in the CSet (including
339 // the ones we find to be self-forwarded) are implicitly live.
340 // So all objects need to be above NTAMS.
341 _next_top_at_mark_start = bottom();
342 _next_marked_bytes = 0;
343 }
344 }
345
346 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
347 bool during_conc_mark,
348 size_t marked_bytes) {
349 assert(marked_bytes <= used(),
350 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
351 _prev_top_at_mark_start = top();
352 _prev_marked_bytes = marked_bytes;
353 }
354
355 // Humongous objects are allocated directly in the old-gen. Need
356 // special handling for concurrent processing encountering an
357 // in-progress allocation.
358 static bool do_oops_on_card_in_humongous(MemRegion mr,
359 FilterOutOfRegionClosure* cl,
360 HeapRegion* hr,
361 G1CollectedHeap* g1h) {
362 assert(hr->is_humongous(), "precondition");
363 HeapRegion* sr = hr->humongous_start_region();
364 oop obj = oop(sr->bottom());
365
366 // If concurrent and klass_or_null is NULL, then space has been
367 // allocated but the object has not yet been published by setting
368 // the klass. That can only happen if the card is stale. However,
369 // we've already set the card clean, so we must return failure,
370 // since the allocating thread could have performed a write to the
371 // card that might be missed otherwise.
372 if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) {
373 return false;
374 }
375
376 // Only filler objects follow a humongous object in the containing
377 // regions, and we can ignore those. So only process the one
378 // humongous object.
379 if (!g1h->is_obj_dead(obj, sr)) {
380 if (obj->is_objArray() || (sr->bottom() < mr.start())) {
381 // objArrays are always marked precisely, so limit processing
382 // with mr. Non-objArrays might be precisely marked, and since
383 // it's humongous it's worthwhile avoiding full processing.
384 // However, the card could be stale and only cover filler
385 // objects. That should be rare, so not worth checking for;
386 // instead let it fall out from the bounded iteration.
387 obj->oop_iterate(cl, mr);
388 } else {
389 // If obj is not an objArray and mr contains the start of the
390 // obj, then this could be an imprecise mark, and we need to
391 // process the entire object.
392 obj->oop_iterate(cl);
393 }
394 }
395 return true;
396 }
397
398 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
399 FilterOutOfRegionClosure* cl,
400 jbyte* card_ptr) {
401 assert(card_ptr != NULL, "pre-condition");
402 G1CollectedHeap* g1h = G1CollectedHeap::heap();
403
404 // If we're within a stop-world GC, then we might look at a card in a
405 // GC alloc region that extends onto a GC LAB, which may not be
406 // parseable. Stop such at the "scan_top" of the region.
407 if (g1h->is_gc_active()) {
408 mr = mr.intersection(MemRegion(bottom(), scan_top()));
409 } else {
410 mr = mr.intersection(used_region());
411 }
412 if (mr.is_empty()) {
413 return true;
414 }
415
416 // The intersection of the incoming mr (for the card) and the
417 // allocated part of the region is non-empty. This implies that
418 // we have actually allocated into this region. The code in
419 // G1CollectedHeap.cpp that allocates a new region sets the
420 // is_young tag on the region before allocating. Thus we
421 // safely know if this region is young.
422 if (is_young()) {
423 return true;
424 }
425
426 // We can only clean the card here, after we make the decision that
427 // the card is not young.
428 *card_ptr = CardTableModRefBS::clean_card_val();
429 // We must complete this write before we do any of the reads below.
430 OrderAccess::storeload();
431
432 // Special handling for humongous regions.
433 if (is_humongous()) {
434 return do_oops_on_card_in_humongous(mr, cl, this, g1h);
435 }
436
437 // During GC we limit mr by scan_top. So we never get here with an
438 // mr covering objects allocated during GC. Non-humongous objects
439 // are only allocated in the old-gen during GC. So the parts of the
440 // heap that may be examined here are always parsable; there's no
441 // need to use klass_or_null here to detect in-progress allocations.
442
443 // Cache the boundaries of the memory region in some const locals
444 HeapWord* const start = mr.start();
445 HeapWord* const end = mr.end();
446
447 // Find the obj that extends onto mr.start().
448 // Update BOT as needed while finding start of (possibly dead)
449 // object containing the start of the region.
450 HeapWord* cur = block_start(start);
451
452 #ifdef ASSERT
453 {
454 assert(cur <= start,
455 "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start));
456 HeapWord* next = cur + block_size(cur);
457 assert(start < next,
458 "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next));
459 }
460 #endif
461
462 do {
463 oop obj = oop(cur);
464 assert(obj->is_oop(true), "Not an oop at " PTR_FORMAT, p2i(cur));
465 assert(obj->klass_or_null() != NULL,
466 "Unparsable heap at " PTR_FORMAT, p2i(cur));
467
468 if (g1h->is_obj_dead(obj, this)) {
469 // Carefully step over dead object.
470 cur += block_size(cur);
471 } else {
472 // Step over live object, and process its references.
473 cur += obj->size();
474 // Non-objArrays are usually marked imprecise at the object
475 // start, in which case we need to iterate over them in full.
476 // objArrays are precisely marked, but can still be iterated
477 // over in full if completely covered.
478 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
479 obj->oop_iterate(cl);
480 } else {
481 obj->oop_iterate(cl, mr);
482 }
483 }
484 } while (cur < end);
485
486 return true;
487 }
488
489 // Code roots support
490
491 void HeapRegion::add_strong_code_root(nmethod* nm) {
492 HeapRegionRemSet* hrrs = rem_set();
493 hrrs->add_strong_code_root(nm);
494 }
495
496 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
497 assert_locked_or_safepoint(CodeCache_lock);
|