18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
29 #include "gc_implementation/g1/heapRegion.inline.hpp"
30 #include "gc_implementation/g1/heapRegionRemSet.hpp"
31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 #include "memory/genOopClosures.inline.hpp"
33 #include "memory/iterator.hpp"
34 #include "oops/oop.inline.hpp"
35
36 int HeapRegion::LogOfHRGrainBytes = 0;
37 int HeapRegion::LogOfHRGrainWords = 0;
38 int HeapRegion::GrainBytes = 0;
39 int HeapRegion::GrainWords = 0;
40 int HeapRegion::CardsPerRegion = 0;
41
42 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
43 HeapRegion* hr, OopClosure* cl,
44 CardTableModRefBS::PrecisionStyle precision,
45 FilterKind fk) :
46 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
47 _hr(hr), _fk(fk), _g1(g1)
48 { }
49
50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
51 OopClosure* oc) :
52 _r_bottom(r->bottom()), _r_end(r->end()),
53 _oc(oc), _out_of_region(0)
54 {}
55
56 class VerifyLiveClosure: public OopClosure {
57 private:
58 G1CollectedHeap* _g1h;
59 CardTableModRefBS* _bs;
60 oop _containing_obj;
332 // Now make sure that we don't go over or under our limits.
333 if (region_size < MIN_REGION_SIZE) {
334 region_size = MIN_REGION_SIZE;
335 } else if (region_size > MAX_REGION_SIZE) {
336 region_size = MAX_REGION_SIZE;
337 }
338
339 // And recalculate the log.
340 region_size_log = log2_long((jlong) region_size);
341
342 // Now, set up the globals.
343 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
344 LogOfHRGrainBytes = region_size_log;
345
346 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
347 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
348
349 guarantee(GrainBytes == 0, "we should only set it once");
350 // The cast to int is safe, given that we've bounded region_size by
351 // MIN_REGION_SIZE and MAX_REGION_SIZE.
352 GrainBytes = (int) region_size;
353
354 guarantee(GrainWords == 0, "we should only set it once");
355 GrainWords = GrainBytes >> LogHeapWordSize;
356 guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
357
358 guarantee(CardsPerRegion == 0, "we should only set it once");
359 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
360 }
361
362 void HeapRegion::reset_after_compaction() {
363 G1OffsetTableContigSpace::reset_after_compaction();
364 // After a compaction the mark bitmap is invalid, so we must
365 // treat all objects as being inside the unmarked area.
366 zero_marked_bytes();
367 init_top_at_mark_start();
368 }
369
370 DirtyCardToOopClosure*
371 HeapRegion::new_dcto_closure(OopClosure* cl,
372 CardTableModRefBS::PrecisionStyle precision,
373 HeapRegionDCTOC::FilterKind fk) {
374 return new HeapRegionDCTOC(G1CollectedHeap::heap(),
375 this, cl, precision, fk);
376 }
389 uninstall_surv_rate_group();
390 set_young_type(NotYoung);
391 reset_pre_dummy_top();
392
393 if (!par) {
394 // If this is parallel, this will be done later.
395 HeapRegionRemSet* hrrs = rem_set();
396 if (hrrs != NULL) hrrs->clear();
397 _claimed = InitialClaimValue;
398 }
399 zero_marked_bytes();
400 set_sort_index(-1);
401
402 _offsets.resize(HeapRegion::GrainWords);
403 init_top_at_mark_start();
404 if (clear_space) clear(SpaceDecorator::Mangle);
405 }
406
407 void HeapRegion::par_clear() {
408 assert(used() == 0, "the region should have been already cleared");
409 assert(capacity() == (size_t) HeapRegion::GrainBytes,
410 "should be back to normal");
411 HeapRegionRemSet* hrrs = rem_set();
412 hrrs->clear();
413 CardTableModRefBS* ct_bs =
414 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
415 ct_bs->clear(MemRegion(bottom(), end()));
416 }
417
418 // <PREDICTION>
419 void HeapRegion::calc_gc_efficiency() {
420 G1CollectedHeap* g1h = G1CollectedHeap::heap();
421 _gc_efficiency = (double) garbage_bytes() /
422 g1h->predict_region_elapsed_time_ms(this, false);
423 }
424 // </PREDICTION>
425
426 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
427 assert(!isHumongous(), "sanity / pre-condition");
428 assert(end() == _orig_end,
429 "Should be normal before the humongous object allocation");
430 assert(top() == bottom(), "should be empty");
446
447 _humongous_type = ContinuesHumongous;
448 _humongous_start_region = first_hr;
449 }
450
451 void HeapRegion::set_notHumongous() {
452 assert(isHumongous(), "pre-condition");
453
454 if (startsHumongous()) {
455 assert(top() <= end(), "pre-condition");
456 set_end(_orig_end);
457 if (top() > end()) {
458 // at least one "continues humongous" region after it
459 set_top(end());
460 }
461 } else {
462 // continues humongous
463 assert(end() == _orig_end, "sanity");
464 }
465
466 assert(capacity() == (size_t) HeapRegion::GrainBytes, "pre-condition");
467 _humongous_type = NotHumongous;
468 _humongous_start_region = NULL;
469 }
470
471 bool HeapRegion::claimHeapRegion(jint claimValue) {
472 jint current = _claimed;
473 if (current != claimValue) {
474 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
475 if (res == current) {
476 return true;
477 }
478 }
479 return false;
480 }
481
482 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
483 HeapWord* low = addr;
484 HeapWord* high = end();
485 while (low < high) {
486 size_t diff = pointer_delta(high, low);
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
29 #include "gc_implementation/g1/heapRegion.inline.hpp"
30 #include "gc_implementation/g1/heapRegionRemSet.hpp"
31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 #include "memory/genOopClosures.inline.hpp"
33 #include "memory/iterator.hpp"
34 #include "oops/oop.inline.hpp"
35
36 int HeapRegion::LogOfHRGrainBytes = 0;
37 int HeapRegion::LogOfHRGrainWords = 0;
38 size_t HeapRegion::GrainBytes = 0;
39 size_t HeapRegion::GrainWords = 0;
40 size_t HeapRegion::CardsPerRegion = 0;
41
42 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
43 HeapRegion* hr, OopClosure* cl,
44 CardTableModRefBS::PrecisionStyle precision,
45 FilterKind fk) :
46 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
47 _hr(hr), _fk(fk), _g1(g1)
48 { }
49
50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
51 OopClosure* oc) :
52 _r_bottom(r->bottom()), _r_end(r->end()),
53 _oc(oc), _out_of_region(0)
54 {}
55
56 class VerifyLiveClosure: public OopClosure {
57 private:
58 G1CollectedHeap* _g1h;
59 CardTableModRefBS* _bs;
60 oop _containing_obj;
332 // Now make sure that we don't go over or under our limits.
333 if (region_size < MIN_REGION_SIZE) {
334 region_size = MIN_REGION_SIZE;
335 } else if (region_size > MAX_REGION_SIZE) {
336 region_size = MAX_REGION_SIZE;
337 }
338
339 // And recalculate the log.
340 region_size_log = log2_long((jlong) region_size);
341
342 // Now, set up the globals.
343 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
344 LogOfHRGrainBytes = region_size_log;
345
346 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
347 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
348
349 guarantee(GrainBytes == 0, "we should only set it once");
350 // The cast to int is safe, given that we've bounded region_size by
351 // MIN_REGION_SIZE and MAX_REGION_SIZE.
352 GrainBytes = (size_t)region_size;
353
354 guarantee(GrainWords == 0, "we should only set it once");
355 GrainWords = GrainBytes >> LogHeapWordSize;
356 guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity");
357
358 guarantee(CardsPerRegion == 0, "we should only set it once");
359 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
360 }
361
362 void HeapRegion::reset_after_compaction() {
363 G1OffsetTableContigSpace::reset_after_compaction();
364 // After a compaction the mark bitmap is invalid, so we must
365 // treat all objects as being inside the unmarked area.
366 zero_marked_bytes();
367 init_top_at_mark_start();
368 }
369
370 DirtyCardToOopClosure*
371 HeapRegion::new_dcto_closure(OopClosure* cl,
372 CardTableModRefBS::PrecisionStyle precision,
373 HeapRegionDCTOC::FilterKind fk) {
374 return new HeapRegionDCTOC(G1CollectedHeap::heap(),
375 this, cl, precision, fk);
376 }
389 uninstall_surv_rate_group();
390 set_young_type(NotYoung);
391 reset_pre_dummy_top();
392
393 if (!par) {
394 // If this is parallel, this will be done later.
395 HeapRegionRemSet* hrrs = rem_set();
396 if (hrrs != NULL) hrrs->clear();
397 _claimed = InitialClaimValue;
398 }
399 zero_marked_bytes();
400 set_sort_index(-1);
401
402 _offsets.resize(HeapRegion::GrainWords);
403 init_top_at_mark_start();
404 if (clear_space) clear(SpaceDecorator::Mangle);
405 }
406
407 void HeapRegion::par_clear() {
408 assert(used() == 0, "the region should have been already cleared");
409 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
410 HeapRegionRemSet* hrrs = rem_set();
411 hrrs->clear();
412 CardTableModRefBS* ct_bs =
413 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
414 ct_bs->clear(MemRegion(bottom(), end()));
415 }
416
417 // <PREDICTION>
418 void HeapRegion::calc_gc_efficiency() {
419 G1CollectedHeap* g1h = G1CollectedHeap::heap();
420 _gc_efficiency = (double) garbage_bytes() /
421 g1h->predict_region_elapsed_time_ms(this, false);
422 }
423 // </PREDICTION>
424
425 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
426 assert(!isHumongous(), "sanity / pre-condition");
427 assert(end() == _orig_end,
428 "Should be normal before the humongous object allocation");
429 assert(top() == bottom(), "should be empty");
445
446 _humongous_type = ContinuesHumongous;
447 _humongous_start_region = first_hr;
448 }
449
450 void HeapRegion::set_notHumongous() {
451 assert(isHumongous(), "pre-condition");
452
453 if (startsHumongous()) {
454 assert(top() <= end(), "pre-condition");
455 set_end(_orig_end);
456 if (top() > end()) {
457 // at least one "continues humongous" region after it
458 set_top(end());
459 }
460 } else {
461 // continues humongous
462 assert(end() == _orig_end, "sanity");
463 }
464
465 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
466 _humongous_type = NotHumongous;
467 _humongous_start_region = NULL;
468 }
469
470 bool HeapRegion::claimHeapRegion(jint claimValue) {
471 jint current = _claimed;
472 if (current != claimValue) {
473 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
474 if (res == current) {
475 return true;
476 }
477 }
478 return false;
479 }
480
481 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
482 HeapWord* low = addr;
483 HeapWord* high = end();
484 while (low < high) {
485 size_t diff = pointer_delta(high, low);
|