src/share/vm/memory/space.cpp

Print this page
rev 6796 : [mq]: templateOopIterate


  79 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
  80                                             HeapWord* bottom,
  81                                             HeapWord* top) {
  82   // 1. Blocks may or may not be objects.
  83   // 2. Even when a block_is_obj(), it may not entirely
  84   //    occupy the block if the block quantum is larger than
  85   //    the object size.
  86   // We can and should try to optimize by calling the non-MemRegion
  87   // version of oop_iterate() for all but the extremal objects
  88   // (for which we need to call the MemRegion version of
  89   // oop_iterate()) To be done post-beta XXX
  90   for (; bottom < top; bottom += _sp->block_size(bottom)) {
  91     // As in the case of contiguous space above, we'd like to
  92     // just use the value returned by oop_iterate to increment the
  93     // current pointer; unfortunately, that won't work in CMS because
  94     // we'd need an interface change (it seems) to have the space
  95     // "adjust the object size" (for instance pad it up to its
  96     // block alignment or minimum block size restrictions. XXX
  97     if (_sp->block_is_obj(bottom) &&
  98         !_sp->obj_allocated_since_save_marks(oop(bottom))) {
  99       oop(bottom)->oop_iterate(_cl, mr);
 100     }
 101   }
 102 }
 103 
 104 // We get called with "mr" representing the dirty region
 105 // that we want to process. Because of imprecise marking,
 106 // we may need to extend the incoming "mr" to the right,
 107 // and scan more. However, because we may already have
 108 // scanned some of that extended region, we may need to
 109 // trim its right-end back some so we do not scan what
 110 // we (or another worker thread) may already have scanned
 111 // or planning to scan.
 112 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
 113 
 114   // Some collectors need to do special things whenever their dirty
 115   // cards are processed. For instance, CMS must remember mutator updates
 116   // (i.e. dirty cards) so as to re-scan mutated objects.
 117   // Such work can be piggy-backed here on dirty card scanning, so as to make
 118   // it slightly more efficient than doing a complete non-destructive pre-scan
 119   // of the card table.


 217   // collector in this space, which may have freed up objects after
 218   // they were dirtied and before the stop-the-world GC that is
 219   // examining cards here.
 220   assert(bottom < top, "ought to be at least one obj on a dirty card.");
 221 
 222   if (_boundary != NULL) {
 223     // We have a boundary outside of which we don't want to look
 224     // at objects, so create a filtering closure around the
 225     // oop closure before walking the region.
 226     FilteringClosure filter(_boundary, _cl);
 227     walk_mem_region_with_cl(mr, bottom, top, &filter);
 228   } else {
 229     // No boundary, simply walk the heap with the oop closure.
 230     walk_mem_region_with_cl(mr, bottom, top, _cl);
 231   }
 232 
 233 }
 234 
 235 // We must replicate this so that the static type of "FilteringClosure"
 236 // (see above) is apparent at the oop_iterate calls.
 237 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
 238 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
 239                                                    HeapWord* bottom,    \
 240                                                    HeapWord* top,       \
 241                                                    ClosureType* cl) {   \
 242   bottom += oop(bottom)->oop_iterate(cl, mr);                           \
 243   if (bottom < top) {                                                   \
 244     HeapWord* next_obj = bottom + oop(bottom)->size();                  \
 245     while (next_obj < top) {                                            \
 246       /* Bottom lies entirely below top, so we can call the */          \
 247       /* non-memRegion version of oop_iterate below. */                 \
 248       oop(bottom)->oop_iterate(cl);                                     \
 249       bottom = next_obj;                                                \
 250       next_obj = bottom + oop(bottom)->size();                          \
 251     }                                                                   \
 252     /* Last object. */                                                  \
 253     oop(bottom)->oop_iterate(cl, mr);                                   \
 254   }                                                                     \
 255 }
 256 
 257 // (There are only two of these, rather than N, because the split is due
 258 // only to the introduction of the FilteringClosure, a local part of the
 259 // impl of this abstraction.)
 260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 261 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 262 
 263 DirtyCardToOopClosure*
 264 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
 265                              CardTableModRefBS::PrecisionStyle precision,
 266                              HeapWord* boundary) {
 267   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
 268 }
 269 
 270 void Space::initialize(MemRegion mr,
 271                        bool clear_space,
 272                        bool mangle_space) {
 273   HeapWord* bottom = mr.start();
 274   HeapWord* end    = mr.end();
 275   assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
 276          "invalid space boundaries");
 277   set_bottom(bottom);
 278   set_end(end);
 279   if (clear_space) clear(mangle_space);
 280 }
 281 


 537     p += oop(p)->size();
 538   }
 539   guarantee(p == top(), "end of last object must match end of space");
 540   if (top() != end()) {
 541     guarantee(top() == block_start_const(end()-1) &&
 542               top() == block_start_const(top()),
 543               "top should be start of unallocated block, if it exists");
 544   }
 545 }
 546 
 547 void Space::oop_iterate(ExtendedOopClosure* blk) {
 548   ObjectToOopClosure blk2(blk);
 549   object_iterate(&blk2);
 550 }
 551 
 552 bool Space::obj_is_alive(const HeapWord* p) const {
 553   assert (block_is_obj(p), "The address should point to an object");
 554   return true;
 555 }
 556 
 557 #if INCLUDE_ALL_GCS
 558 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
 559                                                                             \
 560   void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
 561     HeapWord* obj_addr = mr.start();                                        \
 562     HeapWord* t = mr.end();                                                 \
 563     while (obj_addr < t) {                                                  \
 564       assert(oop(obj_addr)->is_oop(), "Should be an oop");                  \
 565       obj_addr += oop(obj_addr)->oop_iterate(blk);                          \
 566     }                                                                       \
 567   }
 568 
 569   ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
 570 
 571 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
 572 #endif // INCLUDE_ALL_GCS
 573 
 574 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
 575   if (is_empty()) return;
 576   HeapWord* obj_addr = bottom();
 577   HeapWord* t = top();
 578   // Could call objects iterate, but this is easier.
 579   while (obj_addr < t) {
 580     obj_addr += oop(obj_addr)->oop_iterate(blk);
 581   }
 582 }
 583 
 584 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
 585   if (is_empty()) return;
 586   WaterMark bm = bottom_mark();
 587   object_iterate_from(bm, blk);
 588 }
 589 
 590 // For a ContiguousSpace object_iterate() and safe_object_iterate()
 591 // are the same.
 592 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
 593   object_iterate(blk);
 594 }
 595 
 596 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
 597   assert(mark.space() == this, "Mark does not match space");
 598   HeapWord* p = mark.point();
 599   while (p < top()) {
 600     blk->do_object(oop(p));
 601     p += oop(p)->size();
 602   }
 603 }
 604 
 605 HeapWord*
 606 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
 607   HeapWord * limit = concurrent_iteration_safe_limit();
 608   assert(limit <= top(), "sanity check");
 609   for (HeapWord* p = bottom(); p < limit;) {
 610     size_t size = blk->do_object_careful(oop(p));
 611     if (size == 0) {
 612       return p;  // failed at p
 613     } else {
 614       p += size;
 615     }
 616   }
 617   return NULL; // all done
 618 }
 619 
 620 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)  \
 621                                                                           \
 622 void ContiguousSpace::                                                    \
 623 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {            \
 624   HeapWord* t;                                                            \
 625   HeapWord* p = saved_mark_word();                                        \
 626   assert(p != NULL, "expected saved mark");                               \
 627                                                                           \
 628   const intx interval = PrefetchScanIntervalInBytes;                      \
 629   do {                                                                    \
 630     t = top();                                                            \
 631     while (p < t) {                                                       \
 632       Prefetch::write(p, interval);                                       \
 633       debug_only(HeapWord* prev = p);                                     \
 634       oop m = oop(p);                                                     \
 635       p += m->oop_iterate(blk);                                           \
 636     }                                                                     \
 637   } while (t < top());                                                    \
 638                                                                           \
 639   set_saved_mark_word(p);                                                 \
 640 }
 641 
 642 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
 643 
 644 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN

 645 
 646 // Very general, slow implementation.
 647 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
 648   assert(MemRegion(bottom(), end()).contains(p),
 649          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 650                   p, bottom(), end()));
 651   if (p >= top()) {
 652     return top();
 653   } else {
 654     HeapWord* last = bottom();
 655     HeapWord* cur = last;
 656     while (cur <= p) {
 657       last = cur;
 658       cur += oop(cur)->size();
 659     }
 660     assert(oop(last)->is_oop(),
 661            err_msg(PTR_FORMAT " should be an object start", last));
 662     return last;
 663   }
 664 }




  79 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
  80                                             HeapWord* bottom,
  81                                             HeapWord* top) {
  82   // 1. Blocks may or may not be objects.
  83   // 2. Even when a block_is_obj(), it may not entirely
  84   //    occupy the block if the block quantum is larger than
  85   //    the object size.
  86   // We can and should try to optimize by calling the non-MemRegion
  87   // version of oop_iterate() for all but the extremal objects
  88   // (for which we need to call the MemRegion version of
  89   // oop_iterate()) To be done post-beta XXX
  90   for (; bottom < top; bottom += _sp->block_size(bottom)) {
  91     // As in the case of contiguous space above, we'd like to
  92     // just use the value returned by oop_iterate to increment the
  93     // current pointer; unfortunately, that won't work in CMS because
  94     // we'd need an interface change (it seems) to have the space
  95     // "adjust the object size" (for instance pad it up to its
  96     // block alignment or minimum block size restrictions. XXX
  97     if (_sp->block_is_obj(bottom) &&
  98         !_sp->obj_allocated_since_save_marks(oop(bottom))) {
  99       oop(bottom)->oop_iterate<false>(_cl, mr);
 100     }
 101   }
 102 }
 103 
 104 // We get called with "mr" representing the dirty region
 105 // that we want to process. Because of imprecise marking,
 106 // we may need to extend the incoming "mr" to the right,
 107 // and scan more. However, because we may already have
 108 // scanned some of that extended region, we may need to
 109 // trim its right-end back some so we do not scan what
 110 // we (or another worker thread) may already have scanned
 111 // or planning to scan.
 112 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
 113 
 114   // Some collectors need to do special things whenever their dirty
 115   // cards are processed. For instance, CMS must remember mutator updates
 116   // (i.e. dirty cards) so as to re-scan mutated objects.
 117   // Such work can be piggy-backed here on dirty card scanning, so as to make
 118   // it slightly more efficient than doing a complete non-destructive pre-scan
 119   // of the card table.


 217   // collector in this space, which may have freed up objects after
 218   // they were dirtied and before the stop-the-world GC that is
 219   // examining cards here.
 220   assert(bottom < top, "ought to be at least one obj on a dirty card.");
 221 
 222   if (_boundary != NULL) {
 223     // We have a boundary outside of which we don't want to look
 224     // at objects, so create a filtering closure around the
 225     // oop closure before walking the region.
 226     FilteringClosure filter(_boundary, _cl);
 227     walk_mem_region_with_cl(mr, bottom, top, &filter);
 228   } else {
 229     // No boundary, simply walk the heap with the oop closure.
 230     walk_mem_region_with_cl(mr, bottom, top, _cl);
 231   }
 232 
 233 }
 234 
 235 // We must replicate this so that the static type of "FilteringClosure"
 236 // (see above) is apparent at the oop_iterate calls.
 237 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType, nv) \
 238 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
 239                                                    HeapWord* bottom,    \
 240                                                    HeapWord* top,       \
 241                                                    ClosureType* cl) {   \
 242   bottom += oop(bottom)->oop_iterate<nv>(cl, mr);                           \
 243   if (bottom < top) {                                                   \
 244     HeapWord* next_obj = bottom + oop(bottom)->size();                  \
 245     while (next_obj < top) {                                            \
 246       /* Bottom lies entirely below top, so we can call the */          \
 247       /* non-memRegion version of oop_iterate below. */                 \
 248       oop(bottom)->oop_iterate<nv>(cl);                                     \
 249       bottom = next_obj;                                                \
 250       next_obj = bottom + oop(bottom)->size();                          \
 251     }                                                                   \
 252     /* Last object. */                                                  \
 253     oop(bottom)->oop_iterate<nv>(cl, mr);                                   \
 254   }                                                                     \
 255 }
 256 
 257 // (There are only two of these, rather than N, because the split is due
 258 // only to the introduction of the FilteringClosure, a local part of the
 259 // impl of this abstraction.)
 260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure, false)
 261 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure, true)
 262 
 263 DirtyCardToOopClosure*
 264 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
 265                              CardTableModRefBS::PrecisionStyle precision,
 266                              HeapWord* boundary) {
 267   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
 268 }
 269 
 270 void Space::initialize(MemRegion mr,
 271                        bool clear_space,
 272                        bool mangle_space) {
 273   HeapWord* bottom = mr.start();
 274   HeapWord* end    = mr.end();
 275   assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
 276          "invalid space boundaries");
 277   set_bottom(bottom);
 278   set_end(end);
 279   if (clear_space) clear(mangle_space);
 280 }
 281 


 537     p += oop(p)->size();
 538   }
 539   guarantee(p == top(), "end of last object must match end of space");
 540   if (top() != end()) {
 541     guarantee(top() == block_start_const(end()-1) &&
 542               top() == block_start_const(top()),
 543               "top should be start of unallocated block, if it exists");
 544   }
 545 }
 546 
 547 void Space::oop_iterate(ExtendedOopClosure* blk) {
 548   ObjectToOopClosure blk2(blk);
 549   object_iterate(&blk2);
 550 }
 551 
 552 bool Space::obj_is_alive(const HeapWord* p) const {
 553   assert (block_is_obj(p), "The address should point to an object");
 554   return true;
 555 }
 556 

















 557 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
 558   if (is_empty()) return;
 559   HeapWord* obj_addr = bottom();
 560   HeapWord* t = top();
 561   // Could call objects iterate, but this is easier.
 562   while (obj_addr < t) {
 563     obj_addr += oop(obj_addr)->oop_iterate<false>(blk);
 564   }
 565 }
 566 
 567 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
 568   if (is_empty()) return;
 569   WaterMark bm = bottom_mark();
 570   object_iterate_from(bm, blk);
 571 }
 572 
 573 // For a ContiguousSpace object_iterate() and safe_object_iterate()
 574 // are the same.
 575 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
 576   object_iterate(blk);
 577 }
 578 
 579 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
 580   assert(mark.space() == this, "Mark does not match space");
 581   HeapWord* p = mark.point();
 582   while (p < top()) {
 583     blk->do_object(oop(p));
 584     p += oop(p)->size();
 585   }
 586 }
 587 
 588 HeapWord*
 589 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
 590   HeapWord * limit = concurrent_iteration_safe_limit();
 591   assert(limit <= top(), "sanity check");
 592   for (HeapWord* p = bottom(); p < limit;) {
 593     size_t size = blk->do_object_careful(oop(p));
 594     if (size == 0) {
 595       return p;  // failed at p
 596     } else {
 597       p += size;
 598     }
 599   }
 600   return NULL; // all done
 601 }
 602 
 603 template <bool nv, typename OopClosureType>
 604 void ContiguousSpace::
 605 cspace_oop_since_save_marks_iterate(OopClosureType* blk) {
 606   HeapWord* t;
 607   HeapWord* p = saved_mark_word();
 608   assert(p != NULL, "expected saved mark");
 609 
 610   const intx interval = PrefetchScanIntervalInBytes;
 611   do {
 612     t = top();
 613     while (p < t) {
 614       Prefetch::write(p, interval);
 615       debug_only(HeapWord* prev = p);
 616       oop m = oop(p);
 617       p += m->oop_iterate<nv>(blk);
 618     }
 619   } while (t < top());






 620 
 621   set_saved_mark_word(p);
 622 }
 623 
 624 // Very general, slow implementation.
 625 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
 626   assert(MemRegion(bottom(), end()).contains(p),
 627          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 628                   p, bottom(), end()));
 629   if (p >= top()) {
 630     return top();
 631   } else {
 632     HeapWord* last = bottom();
 633     HeapWord* cur = last;
 634     while (cur <= p) {
 635       last = cur;
 636       cur += oop(cur)->size();
 637     }
 638     assert(oop(last)->is_oop(),
 639            err_msg(PTR_FORMAT " should be an object start", last));
 640     return last;
 641   }
 642 }