1 /*
  2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/systemDictionary.hpp"
 27 #include "gc/shared/allocTracer.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/collectedHeap.hpp"
 30 #include "gc/shared/collectedHeap.inline.hpp"
 31 #include "gc/shared/gcLocker.inline.hpp"
 32 #include "gc/shared/gcHeapSummary.hpp"
 33 #include "gc/shared/gcTrace.hpp"
 34 #include "gc/shared/gcTraceTime.inline.hpp"
 35 #include "gc/shared/gcVMOperations.hpp"
 36 #include "gc/shared/gcWhen.hpp"
 37 #include "gc/shared/memAllocator.hpp"
 38 #include "logging/log.hpp"
 39 #include "memory/metaspace.hpp"
 40 #include "memory/resourceArea.hpp"
 41 #include "memory/universe.hpp"
 42 #include "oops/instanceMirrorKlass.hpp"
 43 #include "oops/oop.inline.hpp"
 44 #include "runtime/handles.inline.hpp"
 45 #include "runtime/init.hpp"
 46 #include "runtime/thread.inline.hpp"
 47 #include "runtime/threadSMR.hpp"
 48 #include "runtime/vmThread.hpp"
 49 #include "services/heapDumper.hpp"
 50 #include "utilities/align.hpp"
 51 #include "utilities/copy.hpp"
 52 
 53 class ClassLoaderData;
 54 
 55 size_t CollectedHeap::_filler_array_max_size = 0;
 56 
 57 template <>
 58 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
 59   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
 60   st->print_raw(m);
 61 }
 62 
 63 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
 64   if (!should_log()) {
 65     return;
 66   }
 67 
 68   double timestamp = fetch_timestamp();
 69   MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag);
 70   int index = compute_log_index();
 71   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
 72   _records[index].timestamp = timestamp;
 73   _records[index].data.is_before = before;
 74   stringStream st(_records[index].data.buffer(), _records[index].data.size());
 75 
 76   st.print_cr("{Heap %s GC invocations=%u (full %u):",
 77                  before ? "before" : "after",
 78                  heap->total_collections(),
 79                  heap->total_full_collections());
 80 
 81   heap->print_on(&st);
 82   st.print_cr("}");
 83 }
 84 
 85 size_t CollectedHeap::unused() const {
 86   MutexLocker ml(Heap_lock);
 87   return capacity() - used();
 88 }
 89 
 90 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
 91   size_t capacity_in_words = capacity() / HeapWordSize;
 92 
 93   return VirtualSpaceSummary(
 94     _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end());
 95 }
 96 
 97 GCHeapSummary CollectedHeap::create_heap_summary() {
 98   VirtualSpaceSummary heap_space = create_heap_space_summary();
 99   return GCHeapSummary(heap_space, used());
100 }
101 
102 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
103   const MetaspaceSizes meta_space(
104       MetaspaceUtils::committed_bytes(),
105       MetaspaceUtils::used_bytes(),
106       MetaspaceUtils::reserved_bytes());
107   const MetaspaceSizes data_space(
108       MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
109       MetaspaceUtils::used_bytes(Metaspace::NonClassType),
110       MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
111   const MetaspaceSizes class_space(
112       MetaspaceUtils::committed_bytes(Metaspace::ClassType),
113       MetaspaceUtils::used_bytes(Metaspace::ClassType),
114       MetaspaceUtils::reserved_bytes(Metaspace::ClassType));
115 
116   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
117     MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
118   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
119     MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
120 
121   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
122                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
123 }
124 
125 void CollectedHeap::print_heap_before_gc() {
126   Universe::print_heap_before_gc();
127   if (_gc_heap_log != NULL) {
128     _gc_heap_log->log_heap_before(this);
129   }
130 }
131 
132 void CollectedHeap::print_heap_after_gc() {
133   Universe::print_heap_after_gc();
134   if (_gc_heap_log != NULL) {
135     _gc_heap_log->log_heap_after(this);
136   }
137 }
138 
139 void CollectedHeap::print() const { print_on(tty); }
140 
141 void CollectedHeap::print_on_error(outputStream* st) const {
142   st->print_cr("Heap:");
143   print_extended_on(st);
144   st->cr();
145 
146   BarrierSet::barrier_set()->print_on(st);
147 }
148 
149 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
150   const GCHeapSummary& heap_summary = create_heap_summary();
151   gc_tracer->report_gc_heap_summary(when, heap_summary);
152 
153   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
154   gc_tracer->report_metaspace_summary(when, metaspace_summary);
155 }
156 
157 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
158   trace_heap(GCWhen::BeforeGC, gc_tracer);
159 }
160 
161 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
162   trace_heap(GCWhen::AfterGC, gc_tracer);
163 }
164 
165 // WhiteBox API support for concurrent collectors.  These are the
166 // default implementations, for collectors which don't support this
167 // feature.
168 bool CollectedHeap::supports_concurrent_phase_control() const {
169   return false;
170 }
171 
172 bool CollectedHeap::request_concurrent_phase(const char* phase) {
173   return false;
174 }
175 
176 bool CollectedHeap::is_oop(oop object) const {
177   if (!is_object_aligned(object)) {
178     return false;
179   }
180 
181   if (!is_in(object)) {
182     return false;
183   }
184 
185   if (is_in(object->klass_or_null())) {
186     return false;
187   }
188 
189   return true;
190 }
191 
192 // Memory state functions.
193 
194 
195 CollectedHeap::CollectedHeap() :
196   _is_gc_active(false),
197   _total_collections(0),
198   _total_full_collections(0),
199   _gc_cause(GCCause::_no_gc),
200   _gc_lastcause(GCCause::_no_gc)
201 {
202   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
203   const size_t elements_per_word = HeapWordSize / sizeof(jint);
204   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
205                                              max_len / elements_per_word);
206 
207   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
208   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
209 
210   if (UsePerfData) {
211     EXCEPTION_MARK;
212 
213     // create the gc cause jvmstat counters
214     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
215                              80, GCCause::to_string(_gc_cause), CHECK);
216 
217     _perf_gc_lastcause =
218                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
219                              80, GCCause::to_string(_gc_lastcause), CHECK);
220   }
221 
222   // Create the ring log
223   if (LogEvents) {
224     _gc_heap_log = new GCHeapLog();
225   } else {
226     _gc_heap_log = NULL;
227   }
228 }
229 
230 // This interface assumes that it's being called by the
231 // vm thread. It collects the heap assuming that the
232 // heap lock is already held and that we are executing in
233 // the context of the vm thread.
234 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
235   assert(Thread::current()->is_VM_thread(), "Precondition#1");
236   assert(Heap_lock->is_locked(), "Precondition#2");
237   GCCauseSetter gcs(this, cause);
238   switch (cause) {
239     case GCCause::_heap_inspection:
240     case GCCause::_heap_dump:
241     case GCCause::_metadata_GC_threshold : {
242       HandleMark hm;
243       do_full_collection(false);        // don't clear all soft refs
244       break;
245     }
246     case GCCause::_metadata_GC_clear_soft_refs: {
247       HandleMark hm;
248       do_full_collection(true);         // do clear all soft refs
249       break;
250     }
251     default:
252       ShouldNotReachHere(); // Unexpected use of this function
253   }
254 }
255 
256 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
257                                                             size_t word_size,
258                                                             Metaspace::MetadataType mdtype) {
259   uint loop_count = 0;
260   uint gc_count = 0;
261   uint full_gc_count = 0;
262 
263   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
264 
265   do {
266     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
267     if (result != NULL) {
268       return result;
269     }
270 
271     if (GCLocker::is_active_and_needs_gc()) {
272       // If the GCLocker is active, just expand and allocate.
273       // If that does not succeed, wait if this thread is not
274       // in a critical section itself.
275       result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
276       if (result != NULL) {
277         return result;
278       }
279       JavaThread* jthr = JavaThread::current();
280       if (!jthr->in_critical()) {
281         // Wait for JNI critical section to be exited
282         GCLocker::stall_until_clear();
283         // The GC invoked by the last thread leaving the critical
284         // section will be a young collection and a full collection
285         // is (currently) needed for unloading classes so continue
286         // to the next iteration to get a full GC.
287         continue;
288       } else {
289         if (CheckJNICalls) {
290           fatal("Possible deadlock due to allocating while"
291                 " in jni critical section");
292         }
293         return NULL;
294       }
295     }
296 
297     {  // Need lock to get self consistent gc_count's
298       MutexLocker ml(Heap_lock);
299       gc_count      = Universe::heap()->total_collections();
300       full_gc_count = Universe::heap()->total_full_collections();
301     }
302 
303     // Generate a VM operation
304     VM_CollectForMetadataAllocation op(loader_data,
305                                        word_size,
306                                        mdtype,
307                                        gc_count,
308                                        full_gc_count,
309                                        GCCause::_metadata_GC_threshold);
310     VMThread::execute(&op);
311 
312     // If GC was locked out, try again. Check before checking success because the
313     // prologue could have succeeded and the GC still have been locked out.
314     if (op.gc_locked()) {
315       continue;
316     }
317 
318     if (op.prologue_succeeded()) {
319       return op.result();
320     }
321     loop_count++;
322     if ((QueuedAllocationWarningCount > 0) &&
323         (loop_count % QueuedAllocationWarningCount == 0)) {
324       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
325                             " size=" SIZE_FORMAT, loop_count, word_size);
326     }
327   } while (true);  // Until a GC is done
328 }
329 
330 MemoryUsage CollectedHeap::memory_usage() {
331   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
332 }
333 
334 
335 #ifndef PRODUCT
336 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
337   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
338     // please not mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
339     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
340        assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
341     }
342   }
343 }
344 #endif // PRODUCT
345 
346 size_t CollectedHeap::max_tlab_size() const {
347   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
348   // This restriction could be removed by enabling filling with multiple arrays.
349   // If we compute that the reasonable way as
350   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
351   // we'll overflow on the multiply, so we do the divide first.
352   // We actually lose a little by dividing first,
353   // but that just makes the TLAB  somewhat smaller than the biggest array,
354   // which is fine, since we'll be able to fill that.
355   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
356               sizeof(jint) *
357               ((juint) max_jint / (size_t) HeapWordSize);
358   return align_down(max_int_size, MinObjAlignment);
359 }
360 
361 size_t CollectedHeap::filler_array_hdr_size() {
362   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
363 }
364 
365 size_t CollectedHeap::filler_array_min_size() {
366   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
367 }
368 
369 #ifdef ASSERT
370 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
371 {
372   assert(words >= min_fill_size(), "too small to fill");
373   assert(is_object_aligned(words), "unaligned size");
374 }
375 
376 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
377 {
378   if (ZapFillerObjects && zap) {
379     Copy::fill_to_words(start + filler_array_hdr_size(),
380                         words - filler_array_hdr_size(), 0XDEAFBABE);
381   }
382 }
383 #endif // ASSERT
384 
385 void
386 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
387 {
388   assert(words >= filler_array_min_size(), "too small for an array");
389   assert(words <= filler_array_max_size(), "too big for a single object");
390 
391   const size_t payload_size = words - filler_array_hdr_size();
392   const size_t len = payload_size * HeapWordSize / sizeof(jint);
393   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
394 
395   ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
396   allocator.initialize(start);
397   DEBUG_ONLY(zap_filler_array(start, words, zap);)
398 }
399 
400 void
401 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
402 {
403   assert(words <= filler_array_max_size(), "too big for a single object");
404 
405   if (words >= filler_array_min_size()) {
406     fill_with_array(start, words, zap);
407   } else if (words > 0) {
408     assert(words == min_fill_size(), "unaligned size");
409     ObjAllocator allocator(SystemDictionary::Object_klass(), words);
410     allocator.initialize(start);
411   }
412 }
413 
414 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
415 {
416   DEBUG_ONLY(fill_args_check(start, words);)
417   HandleMark hm;  // Free handles before leaving.
418   fill_with_object_impl(start, words, zap);
419 }
420 
421 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
422 {
423   DEBUG_ONLY(fill_args_check(start, words);)
424   HandleMark hm;  // Free handles before leaving.
425 
426   // Multiple objects may be required depending on the filler array maximum size. Fill
427   // the range up to that with objects that are filler_array_max_size sized. The
428   // remainder is filled with a single object.
429   const size_t min = min_fill_size();
430   const size_t max = filler_array_max_size();
431   while (words > max) {
432     const size_t cur = (words - max) >= min ? max : max - min;
433     fill_with_array(start, cur, zap);
434     start += cur;
435     words -= cur;
436   }
437 
438   fill_with_object_impl(start, words, zap);
439 }
440 
441 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
442   CollectedHeap::fill_with_object(start, end, zap);
443 }
444 
445 size_t CollectedHeap::min_dummy_object_size() const {
446   return oopDesc::header_size();
447 }
448 
449 size_t CollectedHeap::tlab_alloc_reserve() const {
450   size_t min_size = min_dummy_object_size();
451   return min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
452 }
453 
454 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
455                                            size_t requested_size,
456                                            size_t* actual_size) {
457   guarantee(false, "thread-local allocation buffers not supported");
458   return NULL;
459 }
460 
461 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
462   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
463          "Should only be called at a safepoint or at start-up");
464 
465   ThreadLocalAllocStats stats;
466 
467   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) {
468     BarrierSet::barrier_set()->make_parsable(thread);
469     if (UseTLAB) {
470       if (retire_tlabs) {
471         thread->tlab().retire(&stats);
472       } else {
473         thread->tlab().make_parsable();
474       }
475     }
476   }
477 
478   stats.publish();
479 }
480 
481 void CollectedHeap::resize_all_tlabs() {
482   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
483          "Should only resize tlabs at safepoint");
484 
485   if (UseTLAB && ResizeTLAB) {
486     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
487       thread->tlab().resize();
488     }
489   }
490 }
491 
492 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
493   assert(timer != NULL, "timer is null");
494   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
495     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
496     HeapDumper::dump_heap();
497   }
498 
499   LogTarget(Trace, gc, classhisto) lt;
500   if (lt.is_enabled()) {
501     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
502     ResourceMark rm;
503     LogStream ls(lt);
504     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
505     inspector.doit();
506   }
507 }
508 
509 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
510   full_gc_dump(timer, true);
511 }
512 
513 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
514   full_gc_dump(timer, false);
515 }
516 
517 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) {
518   // It is important to do this in a way such that concurrent readers can't
519   // temporarily think something is in the heap.  (Seen this happen in asserts.)
520   _reserved.set_word_size(0);
521   _reserved.set_start((HeapWord*)rs.base());
522   _reserved.set_end((HeapWord*)rs.end());
523 }
524 
525 void CollectedHeap::post_initialize() {
526   initialize_serviceability();
527 }
528 
529 #ifndef PRODUCT
530 
531 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
532   // Access to count is not atomic; the value does not have to be exact.
533   if (PromotionFailureALot) {
534     const size_t gc_num = total_collections();
535     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
536     if (elapsed_gcs >= PromotionFailureALotInterval) {
537       // Test for unsigned arithmetic wrap-around.
538       if (++*count >= PromotionFailureALotCount) {
539         *count = 0;
540         return true;
541       }
542     }
543   }
544   return false;
545 }
546 
547 bool CollectedHeap::promotion_should_fail() {
548   return promotion_should_fail(&_promotion_failure_alot_count);
549 }
550 
551 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
552   if (PromotionFailureALot) {
553     _promotion_failure_alot_gc_number = total_collections();
554     *count = 0;
555   }
556 }
557 
558 void CollectedHeap::reset_promotion_should_fail() {
559   reset_promotion_should_fail(&_promotion_failure_alot_count);
560 }
561 
562 #endif  // #ifndef PRODUCT
563 
564 bool CollectedHeap::supports_object_pinning() const {
565   return false;
566 }
567 
568 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
569   ShouldNotReachHere();
570   return NULL;
571 }
572 
573 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
574   ShouldNotReachHere();
575 }
576 
577 void CollectedHeap::deduplicate_string(oop str) {
578   // Do nothing, unless overridden in subclass.
579 }
580 
581 size_t CollectedHeap::obj_size(oop obj) const {
582   return obj->size();
583 }
584 
585 uint32_t CollectedHeap::hash_oop(oop obj) const {
586   const uintptr_t addr = cast_from_oop<uintptr_t>(obj);
587   return static_cast<uint32_t>(addr >> LogMinObjAlignment);
588 }