< prev index next >

src/hotspot/share/gc/epsilon/epsilonHeap.cpp

Print this page
rev 53302 : 8217014: Epsilon should not ignore Metadata GC causes
Reviewed-by: stuefe


 144     }
 145 
 146     _space->set_end((HeapWord *) _virtual_space.high());
 147     res = _space->par_allocate(size);
 148   }
 149 
 150   size_t used = _space->used();
 151 
 152   // Allocation successful, update counters
 153   {
 154     size_t last = _last_counter_update;
 155     if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
 156       _monitoring_support->update_counters();
 157     }
 158   }
 159 
 160   // ...and print the occupancy line, if needed
 161   {
 162     size_t last = _last_heap_print;
 163     if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
 164       log_info(gc)("Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M (%.2f%%) committed, " SIZE_FORMAT "M (%.2f%%) used",
 165                    max_capacity() / M,
 166                    capacity() / M,
 167                    capacity() * 100.0 / max_capacity(),
 168                    used / M,
 169                    used * 100.0 / max_capacity());
 170     }
 171   }
 172 
 173   assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
 174   return res;
 175 }
 176 
 177 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
 178                                          size_t requested_size,
 179                                          size_t* actual_size) {
 180   Thread* thread = Thread::current();
 181 
 182   // Defaults in case elastic paths are not taken
 183   bool fits = true;
 184   size_t size = requested_size;
 185   size_t ergo_tlab = requested_size;
 186   int64_t time = 0;
 187 
 188   if (EpsilonElasticTLAB) {
 189     ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);


 251     if (EpsilonElasticTLAB && !fits) {
 252       // If we requested expansion, this is our new ergonomic TLAB size
 253       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
 254     }
 255   } else {
 256     // Allocation failed, reset ergonomics to try and fit smaller TLABs
 257     if (EpsilonElasticTLAB) {
 258       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 259     }
 260   }
 261 
 262   return res;
 263 }
 264 
 265 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
 266   *gc_overhead_limit_was_exceeded = false;
 267   return allocate_work(size);
 268 }
 269 
 270 void EpsilonHeap::collect(GCCause::Cause cause) {













 271   log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));

 272   _monitoring_support->update_counters();
 273 }
 274 
 275 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 276   log_info(gc)("Full GC request for \"%s\" is ignored", GCCause::to_string(gc_cause()));
 277   _monitoring_support->update_counters();
 278 }
 279 
 280 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
 281   _space->safe_object_iterate(cl);
 282 }
 283 
 284 void EpsilonHeap::print_on(outputStream *st) const {
 285   st->print_cr("Epsilon Heap");
 286 
 287   // Cast away constness:
 288   ((VirtualSpace)_virtual_space).print_on(st);
 289 
 290   st->print_cr("Allocation space:");
 291   _space->print_on(st);


 292 }
 293 
 294 void EpsilonHeap::print_tracing_info() const {
 295   Log(gc) log;
 296   size_t allocated_kb = used() / K;
 297   log.info("Total allocated: " SIZE_FORMAT " KB",
 298            allocated_kb);
 299   log.info("Average allocation rate: " SIZE_FORMAT " KB/sec",
 300            (size_t)(allocated_kb * NANOSECS_PER_SEC / os::elapsed_counter()));





























 301 }


 144     }
 145 
 146     _space->set_end((HeapWord *) _virtual_space.high());
 147     res = _space->par_allocate(size);
 148   }
 149 
 150   size_t used = _space->used();
 151 
 152   // Allocation successful, update counters
 153   {
 154     size_t last = _last_counter_update;
 155     if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
 156       _monitoring_support->update_counters();
 157     }
 158   }
 159 
 160   // ...and print the occupancy line, if needed
 161   {
 162     size_t last = _last_heap_print;
 163     if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
 164       print_heap_info(used);
 165       print_metaspace_info();




 166     }
 167   }
 168 
 169   assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
 170   return res;
 171 }
 172 
 173 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
 174                                          size_t requested_size,
 175                                          size_t* actual_size) {
 176   Thread* thread = Thread::current();
 177 
 178   // Defaults in case elastic paths are not taken
 179   bool fits = true;
 180   size_t size = requested_size;
 181   size_t ergo_tlab = requested_size;
 182   int64_t time = 0;
 183 
 184   if (EpsilonElasticTLAB) {
 185     ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);


 247     if (EpsilonElasticTLAB && !fits) {
 248       // If we requested expansion, this is our new ergonomic TLAB size
 249       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
 250     }
 251   } else {
 252     // Allocation failed, reset ergonomics to try and fit smaller TLABs
 253     if (EpsilonElasticTLAB) {
 254       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 255     }
 256   }
 257 
 258   return res;
 259 }
 260 
 261 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
 262   *gc_overhead_limit_was_exceeded = false;
 263   return allocate_work(size);
 264 }
 265 
 266 void EpsilonHeap::collect(GCCause::Cause cause) {
 267   switch (cause) {
 268     case GCCause::_metadata_GC_threshold:
 269     case GCCause::_metadata_GC_clear_soft_refs:
 270       // Receiving these causes means the VM itself entered the safepoint for metadata collection.
 271       // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
 272       // re-enter the safepoint again very soon.
 273 
 274       assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
 275       log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
 276       MetaspaceGC::compute_new_size();
 277       print_metaspace_info();
 278       break;
 279     default:
 280       log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
 281   }
 282   _monitoring_support->update_counters();
 283 }
 284 
 285 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 286   collect(gc_cause());

 287 }
 288 
 289 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
 290   _space->safe_object_iterate(cl);
 291 }
 292 
 293 void EpsilonHeap::print_on(outputStream *st) const {
 294   st->print_cr("Epsilon Heap");
 295 
 296   // Cast away constness:
 297   ((VirtualSpace)_virtual_space).print_on(st);
 298 
 299   st->print_cr("Allocation space:");
 300   _space->print_on(st);
 301 
 302   MetaspaceUtils::print_on(st);
 303 }
 304 
 305 void EpsilonHeap::print_tracing_info() const {
 306   print_heap_info(used());
 307   print_metaspace_info();
 308 }
 309 
 310 void EpsilonHeap::print_heap_info(size_t used) const {
 311   size_t reserved  = max_capacity();
 312   size_t committed = capacity();
 313 
 314   if (reserved != 0) {
 315     log_info(gc)("Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M (%.2f%%) committed, " SIZE_FORMAT "M (%.2f%%) used",
 316             reserved / M,
 317             committed / M,
 318             committed * 100.0 / reserved,
 319             used / M,
 320             used * 100.0 / reserved);
 321   } else {
 322     log_info(gc)("Heap: no reliable data");
 323   }
 324 }
 325 
 326 void EpsilonHeap::print_metaspace_info() const {
 327   size_t reserved  = MetaspaceUtils::reserved_bytes();
 328   size_t committed = MetaspaceUtils::committed_bytes();
 329   size_t used      = MetaspaceUtils::used_bytes();
 330 
 331   if (reserved != 0) {
 332     log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M (%.2f%%) committed, " SIZE_FORMAT "M (%.2f%%) used",
 333             reserved / M,
 334             committed / M,
 335             committed * 100.0 / reserved,
 336             used / M,
 337             used * 100.0 / reserved);
 338   } else {
 339     log_info(gc, metaspace)("Metaspace: no reliable data");
 340   }
 341 }
< prev index next >