src/share/vm/gc_implementation/g1/heapRegion.cpp

Print this page




 383 CompactibleSpace* HeapRegion::next_compaction_space() const {
 384   // We're not using an iterator given that it will wrap around when
 385   // it reaches the last region and this is not what we want here.
 386   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 387   uint index = hrs_index() + 1;
 388   while (index < g1h->n_regions()) {
 389     HeapRegion* hr = g1h->region_at(index);
 390     if (!hr->isHumongous()) {
 391       return hr;
 392     }
 393     index += 1;
 394   }
 395   return NULL;
 396 }
 397 
 398 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 399                                                     bool during_conc_mark) {
 400   // We always recreate the prev marking info and we'll explicitly
 401   // mark all objects we find to be self-forwarded on the prev
 402   // bitmap. So all objects need to be below PTAMS.
 403   _prev_top_at_mark_start = top();
 404   _prev_marked_bytes = 0;
 405 
 406   if (during_initial_mark) {
 407     // During initial-mark, we'll also explicitly mark all objects
 408     // we find to be self-forwarded on the next bitmap. So all
 409     // objects need to be below NTAMS.
 410     _next_top_at_mark_start = top();
 411     _next_marked_bytes = 0;
 412   } else if (during_conc_mark) {
 413     // During concurrent mark, all objects in the CSet (including
 414     // the ones we find to be self-forwarded) are implicitly live.
 415     // So all objects need to be above NTAMS.
 416     _next_top_at_mark_start = bottom();
 417     _next_marked_bytes = 0;
 418   }
 419 }
 420 
 421 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
 422                                                   bool during_conc_mark,
 423                                                   size_t marked_bytes) {
 424   assert(0 <= marked_bytes && marked_bytes <= used(),
 425          err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
 426                  marked_bytes, used()));

 427   _prev_marked_bytes = marked_bytes;
 428 }
 429 
 430 HeapWord*
 431 HeapRegion::object_iterate_mem_careful(MemRegion mr,
 432                                                  ObjectClosure* cl) {
 433   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 434   // We used to use "block_start_careful" here.  But we're actually happy
 435   // to update the BOT while we do this...
 436   HeapWord* cur = block_start(mr.start());
 437   mr = mr.intersection(used_region());
 438   if (mr.is_empty()) return NULL;
 439   // Otherwise, find the obj that extends onto mr.start().
 440 
 441   assert(cur <= mr.start()
 442          && (oop(cur)->klass_or_null() == NULL ||
 443              cur + oop(cur)->size() > mr.start()),
 444          "postcondition of block_start");
 445   oop obj;
 446   while (cur < mr.end()) {


 888 };
 889 
 890 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 891 // We would need a mechanism to make that code skip dead objects.
 892 
 893 void HeapRegion::verify(VerifyOption vo,
 894                         bool* failures) const {
 895   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 896   *failures = false;
 897   HeapWord* p = bottom();
 898   HeapWord* prev_p = NULL;
 899   VerifyLiveClosure vl_cl(g1, vo);
 900   bool is_humongous = isHumongous();
 901   bool do_bot_verify = !is_young();
 902   size_t object_num = 0;
 903   while (p < top()) {
 904     oop obj = oop(p);
 905     size_t obj_size = block_size(p);
 906     object_num += 1;
 907 
 908     if (is_humongous != g1->isHumongous(obj_size)) {

 909       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
 910                              SIZE_FORMAT" words) in a %shumongous region",
 911                              p, g1->isHumongous(obj_size) ? "" : "non-",
 912                              obj_size, is_humongous ? "" : "non-");
 913        *failures = true;
 914        return;
 915     }
 916 
 917     // If it returns false, verify_for_object() will output the
 918     // appropriate messasge.
 919     if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {


 920       *failures = true;
 921       return;
 922     }
 923 
 924     if (!g1->is_obj_dead_cond(obj, this, vo)) {
 925       if (obj->is_oop()) {
 926         Klass* klass = obj->klass();
 927         if (!klass->is_metaspace_object()) {



 928           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
 929                                  "not metadata", klass, (void *)obj);
 930           *failures = true;
 931           return;
 932         } else if (!klass->is_klass()) {
 933           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
 934                                  "not a klass", klass, (void *)obj);
 935           *failures = true;
 936           return;
 937         } else {
 938           vl_cl.set_containing_obj(obj);
 939           obj->oop_iterate_no_header(&vl_cl);
 940           if (vl_cl.failures()) {
 941             *failures = true;
 942           }
 943           if (G1MaxVerifyFailures >= 0 &&
 944               vl_cl.n_failures() >= G1MaxVerifyFailures) {
 945             return;
 946           }
 947         }




 383 CompactibleSpace* HeapRegion::next_compaction_space() const {
 384   // We're not using an iterator given that it will wrap around when
 385   // it reaches the last region and this is not what we want here.
 386   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 387   uint index = hrs_index() + 1;
 388   while (index < g1h->n_regions()) {
 389     HeapRegion* hr = g1h->region_at(index);
 390     if (!hr->isHumongous()) {
 391       return hr;
 392     }
 393     index += 1;
 394   }
 395   return NULL;
 396 }
 397 
 398 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 399                                                     bool during_conc_mark) {
 400   // We always recreate the prev marking info and we'll explicitly
 401   // mark all objects we find to be self-forwarded on the prev
 402   // bitmap. So all objects need to be below PTAMS.

 403   _prev_marked_bytes = 0;
 404 
 405   if (during_initial_mark) {
 406     // During initial-mark, we'll also explicitly mark all objects
 407     // we find to be self-forwarded on the next bitmap. So all
 408     // objects need to be below NTAMS.
 409     _next_top_at_mark_start = top();
 410     _next_marked_bytes = 0;
 411   } else if (during_conc_mark) {
 412     // During concurrent mark, all objects in the CSet (including
 413     // the ones we find to be self-forwarded) are implicitly live.
 414     // So all objects need to be above NTAMS.
 415     _next_top_at_mark_start = bottom();
 416     _next_marked_bytes = 0;
 417   }
 418 }
 419 
 420 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
 421                                                   bool during_conc_mark,
 422                                                   size_t marked_bytes) {
 423   assert(0 <= marked_bytes && marked_bytes <= used(),
 424          err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
 425                  marked_bytes, used()));
 426   _prev_top_at_mark_start = top();
 427   _prev_marked_bytes = marked_bytes;
 428 }
 429 
 430 HeapWord*
 431 HeapRegion::object_iterate_mem_careful(MemRegion mr,
 432                                                  ObjectClosure* cl) {
 433   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 434   // We used to use "block_start_careful" here.  But we're actually happy
 435   // to update the BOT while we do this...
 436   HeapWord* cur = block_start(mr.start());
 437   mr = mr.intersection(used_region());
 438   if (mr.is_empty()) return NULL;
 439   // Otherwise, find the obj that extends onto mr.start().
 440 
 441   assert(cur <= mr.start()
 442          && (oop(cur)->klass_or_null() == NULL ||
 443              cur + oop(cur)->size() > mr.start()),
 444          "postcondition of block_start");
 445   oop obj;
 446   while (cur < mr.end()) {


 888 };
 889 
 890 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 891 // We would need a mechanism to make that code skip dead objects.
 892 
 893 void HeapRegion::verify(VerifyOption vo,
 894                         bool* failures) const {
 895   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 896   *failures = false;
 897   HeapWord* p = bottom();
 898   HeapWord* prev_p = NULL;
 899   VerifyLiveClosure vl_cl(g1, vo);
 900   bool is_humongous = isHumongous();
 901   bool do_bot_verify = !is_young();
 902   size_t object_num = 0;
 903   while (p < top()) {
 904     oop obj = oop(p);
 905     size_t obj_size = block_size(p);
 906     object_num += 1;
 907 
 908     if (is_humongous != g1->isHumongous(obj_size) &&
 909         !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
 910       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
 911                              SIZE_FORMAT" words) in a %shumongous region",
 912                              p, g1->isHumongous(obj_size) ? "" : "non-",
 913                              obj_size, is_humongous ? "" : "non-");
 914        *failures = true;
 915        return;
 916     }
 917 
 918     // If it returns false, verify_for_object() will output the
 919     // appropriate messasge.
 920     if (do_bot_verify &&
 921         !g1->is_obj_dead(obj, this) &&
 922         !_offsets.verify_for_object(p, obj_size)) {
 923       *failures = true;
 924       return;
 925     }
 926 
 927     if (!g1->is_obj_dead_cond(obj, this, vo)) {
 928       if (obj->is_oop()) {
 929         Klass* klass = obj->klass();
 930         bool is_metaspace_object = Metaspace::contains(klass) ||
 931                                    (vo == VerifyOption_G1UsePrevMarking &&
 932                                    ClassLoaderDataGraph::unload_list_contains(klass));
 933         if (!is_metaspace_object) {
 934           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
 935                                  "not metadata", klass, (void *)obj);
 936           *failures = true;
 937           return;
 938         } else if (!klass->is_klass()) {
 939           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
 940                                  "not a klass", klass, (void *)obj);
 941           *failures = true;
 942           return;
 943         } else {
 944           vl_cl.set_containing_obj(obj);
 945           obj->oop_iterate_no_header(&vl_cl);
 946           if (vl_cl.failures()) {
 947             *failures = true;
 948           }
 949           if (G1MaxVerifyFailures >= 0 &&
 950               vl_cl.n_failures() >= G1MaxVerifyFailures) {
 951             return;
 952           }
 953         }