< prev index next >

src/share/vm/gc_implementation/g1/heapRegion.cpp

Print this page
rev 8910 : full patch for jfr
   1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  30 #include "gc_implementation/g1/heapRegion.inline.hpp"
  31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp"
  32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"

  34 #include "gc_implementation/shared/liveRange.hpp"
  35 #include "memory/genOopClosures.inline.hpp"
  36 #include "memory/iterator.hpp"
  37 #include "memory/space.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/orderAccess.inline.hpp"
  40 
  41 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  42 
  43 int    HeapRegion::LogOfHRGrainBytes = 0;
  44 int    HeapRegion::LogOfHRGrainWords = 0;
  45 size_t HeapRegion::GrainBytes        = 0;
  46 size_t HeapRegion::GrainWords        = 0;
  47 size_t HeapRegion::CardsPerRegion    = 0;
  48 
  49 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
  50                                  HeapRegion* hr,
  51                                  G1ParPushHeapRSClosure* cl,
  52                                  CardTableModRefBS::PrecisionStyle precision) :
  53   DirtyCardToOopClosure(hr, cl, precision, NULL),


 200 void HeapRegion::calc_gc_efficiency() {
 201   // GC efficiency is the ratio of how much space would be
 202   // reclaimed over how long we predict it would take to reclaim it.
 203   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 204   G1CollectorPolicy* g1p = g1h->g1_policy();
 205 
 206   // Retrieve a prediction of the elapsed time for this region for
 207   // a mixed gc because the region will only be evacuated during a
 208   // mixed gc.
 209   double region_elapsed_time_ms =
 210     g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
 211   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
 212 }
 213 
 214 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
 215   assert(!isHumongous(), "sanity / pre-condition");
 216   assert(end() == _orig_end,
 217          "Should be normal before the humongous object allocation");
 218   assert(top() == bottom(), "should be empty");
 219   assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
 220 


 221   _type.set_starts_humongous();
 222   _humongous_start_region = this;
 223 
 224   set_end(new_end);
 225   _offsets.set_for_starts_humongous(new_top);
 226 }
 227 
 228 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
 229   assert(!isHumongous(), "sanity / pre-condition");
 230   assert(end() == _orig_end,
 231          "Should be normal before the humongous object allocation");
 232   assert(top() == bottom(), "should be empty");
 233   assert(first_hr->startsHumongous(), "pre-condition");
 234 


 235   _type.set_continues_humongous();
 236   _humongous_start_region = first_hr;
 237 }
 238 
 239 void HeapRegion::clear_humongous() {
 240   assert(isHumongous(), "pre-condition");
 241 
 242   if (startsHumongous()) {
 243     assert(top() <= end(), "pre-condition");
 244     set_end(_orig_end);
 245     if (top() > end()) {
 246       // at least one "continues humongous" region after it
 247       set_top(end());
 248     }
 249   } else {
 250     // continues humongous
 251     assert(end() == _orig_end, "sanity");
 252   }
 253 
 254   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");


 286     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
 287     _predicted_bytes_to_copy(0)
 288 {
 289   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
 290   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 291 
 292   initialize(mr);
 293 }
 294 
 295 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 296   assert(_rem_set->is_empty(), "Remembered set must be empty");
 297 
 298   G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
 299 
 300   _orig_end = mr.end();
 301   hr_clear(false /*par*/, false /*clear_space*/);
 302   set_top(bottom());
 303   record_timestamp();
 304 }
 305 










 306 CompactibleSpace* HeapRegion::next_compaction_space() const {
 307   return G1CollectedHeap::heap()->next_compaction_region(this);
 308 }
 309 
 310 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 311                                                     bool during_conc_mark) {
 312   // We always recreate the prev marking info and we'll explicitly
 313   // mark all objects we find to be self-forwarded on the prev
 314   // bitmap. So all objects need to be below PTAMS.
 315   _prev_marked_bytes = 0;
 316 
 317   if (during_initial_mark) {
 318     // During initial-mark, we'll also explicitly mark all objects
 319     // we find to be self-forwarded on the next bitmap. So all
 320     // objects need to be below NTAMS.
 321     _next_top_at_mark_start = top();
 322     _next_marked_bytes = 0;
 323   } else if (during_conc_mark) {
 324     // During concurrent mark, all objects in the CSet (including
 325     // the ones we find to be self-forwarded) are implicitly live.


   1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  30 #include "gc_implementation/g1/heapRegion.inline.hpp"
  31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp"
  32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
  34 #include "gc_implementation/g1/heapRegionTracer.hpp"
  35 #include "gc_implementation/shared/liveRange.hpp"
  36 #include "memory/genOopClosures.inline.hpp"
  37 #include "memory/iterator.hpp"
  38 #include "memory/space.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/orderAccess.inline.hpp"
  41 
  42 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  43 
  44 int    HeapRegion::LogOfHRGrainBytes = 0;
  45 int    HeapRegion::LogOfHRGrainWords = 0;
  46 size_t HeapRegion::GrainBytes        = 0;
  47 size_t HeapRegion::GrainWords        = 0;
  48 size_t HeapRegion::CardsPerRegion    = 0;
  49 
  50 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
  51                                  HeapRegion* hr,
  52                                  G1ParPushHeapRSClosure* cl,
  53                                  CardTableModRefBS::PrecisionStyle precision) :
  54   DirtyCardToOopClosure(hr, cl, precision, NULL),


 201 void HeapRegion::calc_gc_efficiency() {
 202   // GC efficiency is the ratio of how much space would be
 203   // reclaimed over how long we predict it would take to reclaim it.
 204   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 205   G1CollectorPolicy* g1p = g1h->g1_policy();
 206 
 207   // Retrieve a prediction of the elapsed time for this region for
 208   // a mixed gc because the region will only be evacuated during a
 209   // mixed gc.
 210   double region_elapsed_time_ms =
 211     g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
 212   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
 213 }
 214 
 215 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
 216   assert(!isHumongous(), "sanity / pre-condition");
 217   assert(end() == _orig_end,
 218          "Should be normal before the humongous object allocation");
 219   assert(top() == bottom(), "should be empty");
 220   assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
 221   if (EnableJFR) {
 222     report_region_type_change(G1HeapRegionTraceType::StartsHumongous);
 223   }
 224   _type.set_starts_humongous();
 225   _humongous_start_region = this;
 226 
 227   set_end(new_end);
 228   _offsets.set_for_starts_humongous(new_top);
 229 }
 230 
 231 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
 232   assert(!isHumongous(), "sanity / pre-condition");
 233   assert(end() == _orig_end,
 234          "Should be normal before the humongous object allocation");
 235   assert(top() == bottom(), "should be empty");
 236   assert(first_hr->startsHumongous(), "pre-condition");
 237   if (EnableJFR) {
 238     report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous);
 239   }
 240   _type.set_continues_humongous();
 241   _humongous_start_region = first_hr;
 242 }
 243 
 244 void HeapRegion::clear_humongous() {
 245   assert(isHumongous(), "pre-condition");
 246 
 247   if (startsHumongous()) {
 248     assert(top() <= end(), "pre-condition");
 249     set_end(_orig_end);
 250     if (top() > end()) {
 251       // at least one "continues humongous" region after it
 252       set_top(end());
 253     }
 254   } else {
 255     // continues humongous
 256     assert(end() == _orig_end, "sanity");
 257   }
 258 
 259   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");


 291     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
 292     _predicted_bytes_to_copy(0)
 293 {
 294   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
 295   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 296 
 297   initialize(mr);
 298 }
 299 
 300 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 301   assert(_rem_set->is_empty(), "Remembered set must be empty");
 302 
 303   G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
 304 
 305   _orig_end = mr.end();
 306   hr_clear(false /*par*/, false /*clear_space*/);
 307   set_top(bottom());
 308   record_timestamp();
 309 }
 310 
 311 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
 312   assert(EnableJFR, "sanity check");
 313   HeapRegionTracer::send_region_type_change(_hrm_index,
 314                                             get_trace_type(),
 315                                             to,
 316                                             (uintptr_t)bottom(),
 317                                             used());
 318 }
 319 
 320 
 321 CompactibleSpace* HeapRegion::next_compaction_space() const {
 322   return G1CollectedHeap::heap()->next_compaction_region(this);
 323 }
 324 
 325 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 326                                                     bool during_conc_mark) {
 327   // We always recreate the prev marking info and we'll explicitly
 328   // mark all objects we find to be self-forwarded on the prev
 329   // bitmap. So all objects need to be below PTAMS.
 330   _prev_marked_bytes = 0;
 331 
 332   if (during_initial_mark) {
 333     // During initial-mark, we'll also explicitly mark all objects
 334     // we find to be self-forwarded on the next bitmap. So all
 335     // objects need to be below NTAMS.
 336     _next_top_at_mark_start = top();
 337     _next_marked_bytes = 0;
 338   } else if (during_conc_mark) {
 339     // During concurrent mark, all objects in the CSet (including
 340     // the ones we find to be self-forwarded) are implicitly live.


< prev index next >