src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
Print this page
rev 4801 : imported patch code-movement
rev 4802 : imported patch optimize-nmethod-scanning
rev 4803 : imported patch thomas-comments
*** 3347,3357 ****
"Expected to be executed serially by the VM thread at this point");
if (!silent) { gclog_or_tty->print("Roots "); }
VerifyRootsClosure rootsCl(vo);
G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
! G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl, /*do_marking=*/ false);
VerifyKlassClosure klassCl(this, &rootsCl);
// We apply the relevant closures to all the oops in the
// system dictionary, the string table and the code cache.
const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
--- 3347,3357 ----
"Expected to be executed serially by the VM thread at this point");
if (!silent) { gclog_or_tty->print("Roots "); }
VerifyRootsClosure rootsCl(vo);
G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
! G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl, false /* do_marking */);
VerifyKlassClosure klassCl(this, &rootsCl);
// We apply the relevant closures to all the oops in the
// system dictionary, the string table and the code cache.
const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
*** 3875,3887 ****
// get entries from the secondary_free_list.
if (!G1StressConcRegionFreeing) {
append_secondary_free_list_if_not_empty_with_lock();
}
! assert(check_young_list_well_formed(),
! "young list should be well formed");
!
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
"sanity check");
// Don't dynamically change the number of GC threads this early. A value of
// 0 is used to indicate serial work. When parallel work is done,
--- 3875,3885 ----
// get entries from the secondary_free_list.
if (!G1StressConcRegionFreeing) {
append_secondary_free_list_if_not_empty_with_lock();
}
! assert(check_young_list_well_formed(), "young list should be well formed");
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
"sanity check");
// Don't dynamically change the number of GC threads this early. A value of
// 0 is used to indicate serial work. When parallel work is done,
*** 4983,4995 ****
scan_klasses_cl = &scan_mark_klasses_cl_s;
}
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
! // Don't scan the code cache as part of strong root scanning. The code
! // roots that point into a region are scanned when we scan the RSet
! // of that region.
int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
pss.start_strong_roots();
_g1h->g1_process_strong_roots(/* is scavenging */ true,
SharedHeap::ScanningOption(so),
--- 4981,4994 ----
scan_klasses_cl = &scan_mark_klasses_cl_s;
}
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
! // Don't scan the scavengable methods in the code cache as part
! // of strong root scanning. The code roots that point into a
! // region in the collection set are scanned when we scan the
! // region's RSet.
int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
pss.start_strong_roots();
_g1h->g1_process_strong_roots(/* is scavenging */ true,
SharedHeap::ScanningOption(so),
*** 5097,5107 ****
// If this is an initial mark pause, and we're not scanning
// the entire code cache, we need to mark the oops in the
// strong code root lists for the regions that are not in
// the collection set.
! // Note all threads partipate in this set of root tasks.
double mark_strong_code_roots_ms = 0.0;
if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
double mark_strong_roots_start = os::elapsedTime();
mark_strong_code_roots(worker_i);
mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
--- 5096,5106 ----
// If this is an initial mark pause, and we're not scanning
// the entire code cache, we need to mark the oops in the
// strong code root lists for the regions that are not in
// the collection set.
! // Note all threads participate in this set of root tasks.
double mark_strong_code_roots_ms = 0.0;
if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
double mark_strong_roots_start = os::elapsedTime();
mark_strong_code_roots(worker_i);
mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
*** 6549,6565 ****
nmethod* _nm;
template <class T> void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
! HeapRegion* hr = _g1h->heap_region_containing(heap_oop);
! assert(!hr->isHumongous(), "nmethod oop in numongous?");
! // Note this may push duplicates but that is OK since
! // when we scan the nmethods during GC we "mark" them
! // as visited.
! hr->push_strong_code_root(_nm);
assert(hr->strong_code_root_list()->contains(_nm), "push failed?");
}
}
public:
--- 6548,6565 ----
nmethod* _nm;
template <class T> void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
! oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
! HeapRegion* hr = _g1h->heap_region_containing(obj);
! assert(!hr->isHumongous(), "code root in humongous region?");
! // HeapRegion::add_strong_code_root() avoids adding duplicate
! // entries but having duplicates is OK since we "mark" nmethods
! // as visited when we scan the strong code root lists during the GC.
! hr->add_strong_code_root(_nm);
assert(hr->strong_code_root_list()->contains(_nm), "push failed?");
}
}
public:
*** 6575,6586 ****
nmethod* _nm;
template <class T> void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
! HeapRegion* hr = _g1h->heap_region_containing(heap_oop);
! assert(!hr->isHumongous(), "nmethod oop in numongous?");
hr->remove_strong_code_root(_nm);
assert(!hr->strong_code_root_list()->contains(_nm), "remove failed?");
}
}
--- 6575,6587 ----
nmethod* _nm;
template <class T> void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
! oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
! HeapRegion* hr = _g1h->heap_region_containing(obj);
! assert(!hr->isHumongous(), "code root in humongous region?");
hr->remove_strong_code_root(_nm);
assert(!hr->strong_code_root_list()->contains(_nm), "remove failed?");
}
}
*** 6591,6611 ****
void do_oop(oop* p) { do_oop_work(p); }
void do_oop(narrowOop* p) { do_oop_work(p); }
};
void G1CollectedHeap::register_nmethod(nmethod* nm) {
! assert(nm != NULL, "sanity");
! if (nm == NULL) return;
!
RegisterNMethodOopClosure reg_cl(this, nm);
nm->oops_do(®_cl);
}
void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
! assert(nm != NULL, "sanity");
! if (nm == NULL) return;
!
UnregisterNMethodOopClosure reg_cl(this, nm);
nm->oops_do(®_cl);
}
class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
--- 6592,6608 ----
void do_oop(oop* p) { do_oop_work(p); }
void do_oop(narrowOop* p) { do_oop_work(p); }
};
void G1CollectedHeap::register_nmethod(nmethod* nm) {
! guarantee(nm != NULL, "sanity");
RegisterNMethodOopClosure reg_cl(this, nm);
nm->oops_do(®_cl);
}
void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
! guarantee(nm != NULL, "sanity");
UnregisterNMethodOopClosure reg_cl(this, nm);
nm->oops_do(®_cl);
}
class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
*** 6623,6644 ****
collection_set_iterate(&cl);
double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
}
class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
- // Note when we're marking the oops in the strong code roots lists
- // for regions not in the collection set, we do not want to use a
- // "marking" CodeBlobToOopClosure. We don't want to get in the following
- // situation if marking sees an nmethod whose oops span a couple
- // of regions (one in the collection set and the other not).
- //
- // A "marking" CodeBlobToOopClosure would end up "marking" the nmethod
- // by walking the code root list for region not in the collection set.
- // When we come to scan the code root list for the region in the
- // collection, we would skip the nmethod because it's already been
- // "marked" - potentially missing some roots.
class MarkStrongCodeRootOopClosure: public OopClosure {
ConcurrentMark* _cm;
HeapRegion* _hr;
uint _worker_id;
--- 6620,6643 ----
collection_set_iterate(&cl);
double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
}
+ // Mark all the code roots that point into regions *not* in the
+ // collection set.
+ //
+ // Note we do not want to use a "marking" CodeBlobToOopClosure while
+ // walking the the code roots lists of regions not in the collection
+ // set. Suppose we have an nmethod (M) that points to objects in two
+ // separate regions - one in the collection set (R1) and one not (R2).
+ // Using a "marking" CodeBlobToOopClosure here would result in "marking"
+ // nmethod M when walking the code roots for R1. When we come to scan
+ // the code roots for R2, we would see that M is already marked and it
+ // would be skipped and the objects in R2 that are referenced from M
+ // would not be evacuated.
+
class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
class MarkStrongCodeRootOopClosure: public OopClosure {
ConcurrentMark* _cm;
HeapRegion* _hr;
uint _worker_id;
*** 6655,6665 ****
}
}
public:
MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
! _cm(cm), _hr(hr), _worker_id(worker_id) {}
void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop(oop* p) { do_oop_work(p); }
};
--- 6654,6666 ----
}
}
public:
MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
! _cm(cm), _hr(hr), _worker_id(worker_id) {
! assert(!_hr->in_collection_set(), "sanity");
! }
void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop(oop* p) { do_oop_work(p); }
};