< prev index next >

src/share/vm/gc/g1/heapRegion.cpp

Print this page
rev 12513 : 8071278: Fix the closure mess in G1RemSet::refine_card()
Summary: Remove the use of many nested closure in the code to refine a card.
Reviewed-by: kbarrett, sjohanss
   1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  38 #include "logging/log.hpp"
  39 #include "memory/iterator.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 
  45 int    HeapRegion::LogOfHRGrainBytes = 0;
  46 int    HeapRegion::LogOfHRGrainWords = 0;
  47 size_t HeapRegion::GrainBytes        = 0;
  48 size_t HeapRegion::GrainWords        = 0;
  49 size_t HeapRegion::CardsPerRegion    = 0;
  50 
  51 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
  52                                  HeapRegion* hr,
  53                                  G1ParPushHeapRSClosure* cl,
  54                                  CardTableModRefBS::PrecisionStyle precision) :
  55   DirtyCardToOopClosure(hr, cl, precision, NULL),
  56   _hr(hr), _rs_scan(cl), _g1(g1) { }
  57 
  58 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
  59                                                    OopClosure* oc) :
  60   _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
  61 
  62 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
  63                                       HeapWord* bottom,
  64                                       HeapWord* top) {
  65   G1CollectedHeap* g1h = _g1;
  66   size_t oop_size;
  67   HeapWord* cur = bottom;
  68 
  69   // Start filtering what we add to the remembered set. If the object is
  70   // not considered dead, either because it is marked (in the mark bitmap)
  71   // or it was allocated after marking finished, then we add it. Otherwise
  72   // we can safely ignore the object.
  73   if (!g1h->is_obj_dead(oop(cur))) {
  74     oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
  75   } else {
  76     oop_size = _hr->block_size(cur);
  77   }
  78 
  79   cur += oop_size;
  80 
  81   if (cur < top) {


 336   } else if (during_conc_mark) {
 337     // During concurrent mark, all objects in the CSet (including
 338     // the ones we find to be self-forwarded) are implicitly live.
 339     // So all objects need to be above NTAMS.
 340     _next_top_at_mark_start = bottom();
 341     _next_marked_bytes = 0;
 342   }
 343 }
 344 
 345 void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) {
 346   assert(marked_bytes <= used(),
 347          "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
 348   _prev_top_at_mark_start = top();
 349   _prev_marked_bytes = marked_bytes;
 350 }
 351 
 352 // Humongous objects are allocated directly in the old-gen.  Need
 353 // special handling for concurrent processing encountering an
 354 // in-progress allocation.
 355 static bool do_oops_on_card_in_humongous(MemRegion mr,
 356                                          FilterOutOfRegionClosure* cl,
 357                                          HeapRegion* hr,
 358                                          G1CollectedHeap* g1h) {
 359   assert(hr->is_humongous(), "precondition");
 360   HeapRegion* sr = hr->humongous_start_region();
 361   oop obj = oop(sr->bottom());
 362 
 363   // If concurrent and klass_or_null is NULL, then space has been
 364   // allocated but the object has not yet been published by setting
 365   // the klass.  That can only happen if the card is stale.  However,
 366   // we've already set the card clean, so we must return failure,
 367   // since the allocating thread could have performed a write to the
 368   // card that might be missed otherwise.
 369   if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) {
 370     return false;
 371   }
 372 
 373   // We have a well-formed humongous object at the start of sr.
 374   // Only filler objects follow a humongous object in the containing
 375   // regions, and we can ignore those.  So only process the one
 376   // humongous object.
 377   if (!g1h->is_obj_dead(obj, sr)) {
 378     if (obj->is_objArray() || (sr->bottom() < mr.start())) {
 379       // objArrays are always marked precisely, so limit processing
 380       // with mr.  Non-objArrays might be precisely marked, and since
 381       // it's humongous it's worthwhile avoiding full processing.
 382       // However, the card could be stale and only cover filler
 383       // objects.  That should be rare, so not worth checking for;
 384       // instead let it fall out from the bounded iteration.
 385       obj->oop_iterate(cl, mr);
 386     } else {
 387       // If obj is not an objArray and mr contains the start of the
 388       // obj, then this could be an imprecise mark, and we need to
 389       // process the entire object.
 390       obj->oop_iterate(cl);
 391     }
 392   }
 393   return true;
 394 }
 395 
 396 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
 397                                                   FilterOutOfRegionClosure* cl) {
 398   assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
 399   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 400 
 401   // Special handling for humongous regions.
 402   if (is_humongous()) {
 403     return do_oops_on_card_in_humongous(mr, cl, this, g1h);
 404   }
 405   assert(is_old(), "precondition");
 406 
 407   // Because mr has been trimmed to what's been allocated in this
 408   // region, the parts of the heap that are examined here are always
 409   // parsable; there's no need to use klass_or_null to detect
 410   // in-progress allocation.
 411 
 412   // Cache the boundaries of the memory region in some const locals
 413   HeapWord* const start = mr.start();
 414   HeapWord* const end = mr.end();
 415 
 416   // Find the obj that extends onto mr.start().
 417   // Update BOT as needed while finding start of (possibly dead)


 746           if (!_failures) {
 747             log.error("----------");
 748           }
 749           log.error("Missing rem set entry:");
 750           log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT,
 751             p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
 752           ResourceMark rm;
 753           _containing_obj->print_on(log.error_stream());
 754           log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to));
 755           if (obj->is_oop()) {
 756             obj->print_on(log.error_stream());
 757           }
 758           log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field);
 759           log.error("----------");
 760           _failures = true;
 761           _n_failures++;
 762         }
 763       }
 764     }
 765   }















 766 };
 767 
 768 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 769 // We would need a mechanism to make that code skip dead objects.
 770 
 771 void HeapRegion::verify(VerifyOption vo,
 772                         bool* failures) const {
 773   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 774   *failures = false;
 775   HeapWord* p = bottom();
 776   HeapWord* prev_p = NULL;
 777   VerifyLiveClosure vl_cl(g1, vo);
 778   VerifyRemSetClosure vr_cl(g1, vo);
 779   bool is_region_humongous = is_humongous();
 780   size_t object_num = 0;
 781   while (p < top()) {
 782     oop obj = oop(p);
 783     size_t obj_size = block_size(p);
 784     object_num += 1;
 785 


   1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  38 #include "logging/log.hpp"
  39 #include "memory/iterator.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 
  45 int    HeapRegion::LogOfHRGrainBytes = 0;
  46 int    HeapRegion::LogOfHRGrainWords = 0;
  47 size_t HeapRegion::GrainBytes        = 0;
  48 size_t HeapRegion::GrainWords        = 0;
  49 size_t HeapRegion::CardsPerRegion    = 0;
  50 
  51 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
  52                                  HeapRegion* hr,
  53                                  G1ParPushHeapRSClosure* cl,
  54                                  CardTableModRefBS::PrecisionStyle precision) :
  55   DirtyCardToOopClosure(hr, cl, precision, NULL),
  56   _hr(hr), _rs_scan(cl), _g1(g1) { }
  57 




  58 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
  59                                       HeapWord* bottom,
  60                                       HeapWord* top) {
  61   G1CollectedHeap* g1h = _g1;
  62   size_t oop_size;
  63   HeapWord* cur = bottom;
  64 
  65   // Start filtering what we add to the remembered set. If the object is
  66   // not considered dead, either because it is marked (in the mark bitmap)
  67   // or it was allocated after marking finished, then we add it. Otherwise
  68   // we can safely ignore the object.
  69   if (!g1h->is_obj_dead(oop(cur))) {
  70     oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
  71   } else {
  72     oop_size = _hr->block_size(cur);
  73   }
  74 
  75   cur += oop_size;
  76 
  77   if (cur < top) {


 332   } else if (during_conc_mark) {
 333     // During concurrent mark, all objects in the CSet (including
 334     // the ones we find to be self-forwarded) are implicitly live.
 335     // So all objects need to be above NTAMS.
 336     _next_top_at_mark_start = bottom();
 337     _next_marked_bytes = 0;
 338   }
 339 }
 340 
 341 void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) {
 342   assert(marked_bytes <= used(),
 343          "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
 344   _prev_top_at_mark_start = top();
 345   _prev_marked_bytes = marked_bytes;
 346 }
 347 
 348 // Humongous objects are allocated directly in the old-gen.  Need
 349 // special handling for concurrent processing encountering an
 350 // in-progress allocation.
 351 static bool do_oops_on_card_in_humongous(MemRegion mr,
 352                                          G1UpdateRSOrPushRefOopClosure* cl,
 353                                          HeapRegion* hr,
 354                                          G1CollectedHeap* g1h) {
 355   assert(hr->is_humongous(), "precondition");
 356   HeapRegion* sr = hr->humongous_start_region();
 357   oop obj = oop(sr->bottom());
 358 
 359   // If concurrent and klass_or_null is NULL, then space has been
 360   // allocated but the object has not yet been published by setting
 361   // the klass.  That can only happen if the card is stale.  However,
 362   // we've already set the card clean, so we must return failure,
 363   // since the allocating thread could have performed a write to the
 364   // card that might be missed otherwise.
 365   if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) {
 366     return false;
 367   }
 368 
 369   // We have a well-formed humongous object at the start of sr.
 370   // Only filler objects follow a humongous object in the containing
 371   // regions, and we can ignore those.  So only process the one
 372   // humongous object.
 373   if (!g1h->is_obj_dead(obj, sr)) {
 374     if (obj->is_objArray() || (sr->bottom() < mr.start())) {
 375       // objArrays are always marked precisely, so limit processing
 376       // with mr.  Non-objArrays might be precisely marked, and since
 377       // it's humongous it's worthwhile avoiding full processing.
 378       // However, the card could be stale and only cover filler
 379       // objects.  That should be rare, so not worth checking for;
 380       // instead let it fall out from the bounded iteration.
 381       obj->oop_iterate(cl, mr);
 382     } else {
 383       // If obj is not an objArray and mr contains the start of the
 384       // obj, then this could be an imprecise mark, and we need to
 385       // process the entire object.
 386       obj->oop_iterate(cl);
 387     }
 388   }
 389   return true;
 390 }
 391 
 392 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
 393                                                   G1UpdateRSOrPushRefOopClosure* cl) {
 394   assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
 395   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 396 
 397   // Special handling for humongous regions.
 398   if (is_humongous()) {
 399     return do_oops_on_card_in_humongous(mr, cl, this, g1h);
 400   }
 401   assert(is_old(), "precondition");
 402 
 403   // Because mr has been trimmed to what's been allocated in this
 404   // region, the parts of the heap that are examined here are always
 405   // parsable; there's no need to use klass_or_null to detect
 406   // in-progress allocation.
 407 
 408   // Cache the boundaries of the memory region in some const locals
 409   HeapWord* const start = mr.start();
 410   HeapWord* const end = mr.end();
 411 
 412   // Find the obj that extends onto mr.start().
 413   // Update BOT as needed while finding start of (possibly dead)


 742           if (!_failures) {
 743             log.error("----------");
 744           }
 745           log.error("Missing rem set entry:");
 746           log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT,
 747             p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
 748           ResourceMark rm;
 749           _containing_obj->print_on(log.error_stream());
 750           log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to));
 751           if (obj->is_oop()) {
 752             obj->print_on(log.error_stream());
 753           }
 754           log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field);
 755           log.error("----------");
 756           _failures = true;
 757           _n_failures++;
 758         }
 759       }
 760     }
 761   }
 762 };
 763 
 764 // Closure that applies the given two closures in sequence.
 765 class G1Mux2Closure : public OopClosure {
 766   OopClosure* _c1;
 767   OopClosure* _c2;
 768 public:
 769   G1Mux2Closure(OopClosure *c1, OopClosure *c2) { _c1 = c1; _c2 = c2; }
 770   template <class T> inline void do_oop_work(T* p) {
 771     // Apply first closure; then apply the second.
 772     _c1->do_oop(p);
 773     _c2->do_oop(p);
 774   }
 775   virtual inline void do_oop(oop* p) { do_oop_work(p); }
 776   virtual inline void do_oop(narrowOop* p) { do_oop_work(p); }
 777 };
 778 
 779 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 780 // We would need a mechanism to make that code skip dead objects.
 781 
 782 void HeapRegion::verify(VerifyOption vo,
 783                         bool* failures) const {
 784   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 785   *failures = false;
 786   HeapWord* p = bottom();
 787   HeapWord* prev_p = NULL;
 788   VerifyLiveClosure vl_cl(g1, vo);
 789   VerifyRemSetClosure vr_cl(g1, vo);
 790   bool is_region_humongous = is_humongous();
 791   size_t object_num = 0;
 792   while (p < top()) {
 793     oop obj = oop(p);
 794     size_t obj_size = block_size(p);
 795     object_num += 1;
 796 


< prev index next >