1 /*
  2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/systemDictionary.hpp"
 27 #include "gc/shared/allocTracer.hpp"
 28 #include "gc/shared/barrierSet.inline.hpp"
 29 #include "gc/shared/collectedHeap.hpp"
 30 #include "gc/shared/collectedHeap.inline.hpp"
 31 #include "gc/shared/gcLocker.inline.hpp"
 32 #include "gc/shared/gcHeapSummary.hpp"
 33 #include "gc/shared/gcTrace.hpp"
 34 #include "gc/shared/gcTraceTime.inline.hpp"
 35 #include "gc/shared/gcWhen.hpp"
 36 #include "gc/shared/vmGCOperations.hpp"
 37 #include "logging/log.hpp"
 38 #include "memory/metaspace.hpp"
 39 #include "memory/resourceArea.hpp"
 40 #include "oops/instanceMirrorKlass.hpp"
 41 #include "oops/oop.inline.hpp"
 42 #include "runtime/handles.inline.hpp"
 43 #include "runtime/init.hpp"
 44 #include "runtime/thread.inline.hpp"
 45 #include "runtime/threadSMR.hpp"
 46 #include "runtime/vmThread.hpp"
 47 #include "services/heapDumper.hpp"
 48 #include "utilities/align.hpp"
 49 
 50 class ClassLoaderData;
 51 
 52 #ifdef ASSERT
 53 int CollectedHeap::_fire_out_of_memory_count = 0;
 54 #endif
 55 
 56 size_t CollectedHeap::_filler_array_max_size = 0;
 57 
 58 template <>
 59 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
 60   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
 61   st->print_raw(m);
 62 }
 63 
 64 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
 65   if (!should_log()) {
 66     return;
 67   }
 68 
 69   double timestamp = fetch_timestamp();
 70   MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
 71   int index = compute_log_index();
 72   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
 73   _records[index].timestamp = timestamp;
 74   _records[index].data.is_before = before;
 75   stringStream st(_records[index].data.buffer(), _records[index].data.size());
 76 
 77   st.print_cr("{Heap %s GC invocations=%u (full %u):",
 78                  before ? "before" : "after",
 79                  heap->total_collections(),
 80                  heap->total_full_collections());
 81 
 82   heap->print_on(&st);
 83   st.print_cr("}");
 84 }
 85 
 86 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
 87   size_t capacity_in_words = capacity() / HeapWordSize;
 88 
 89   return VirtualSpaceSummary(
 90     reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
 91 }
 92 
 93 GCHeapSummary CollectedHeap::create_heap_summary() {
 94   VirtualSpaceSummary heap_space = create_heap_space_summary();
 95   return GCHeapSummary(heap_space, used());
 96 }
 97 
 98 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
 99   const MetaspaceSizes meta_space(
100       MetaspaceAux::committed_bytes(),
101       MetaspaceAux::used_bytes(),
102       MetaspaceAux::reserved_bytes());
103   const MetaspaceSizes data_space(
104       MetaspaceAux::committed_bytes(Metaspace::NonClassType),
105       MetaspaceAux::used_bytes(Metaspace::NonClassType),
106       MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
107   const MetaspaceSizes class_space(
108       MetaspaceAux::committed_bytes(Metaspace::ClassType),
109       MetaspaceAux::used_bytes(Metaspace::ClassType),
110       MetaspaceAux::reserved_bytes(Metaspace::ClassType));
111 
112   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
113     MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);
114   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
115     MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);
116 
117   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
118                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
119 }
120 
121 void CollectedHeap::print_heap_before_gc() {
122   Universe::print_heap_before_gc();
123   if (_gc_heap_log != NULL) {
124     _gc_heap_log->log_heap_before(this);
125   }
126 }
127 
128 void CollectedHeap::print_heap_after_gc() {
129   Universe::print_heap_after_gc();
130   if (_gc_heap_log != NULL) {
131     _gc_heap_log->log_heap_after(this);
132   }
133 }
134 
135 void CollectedHeap::print_on_error(outputStream* st) const {
136   st->print_cr("Heap:");
137   print_extended_on(st);
138   st->cr();
139 
140   _barrier_set->print_on(st);
141 }
142 
143 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
144   const GCHeapSummary& heap_summary = create_heap_summary();
145   gc_tracer->report_gc_heap_summary(when, heap_summary);
146 
147   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
148   gc_tracer->report_metaspace_summary(when, metaspace_summary);
149 }
150 
151 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
152   trace_heap(GCWhen::BeforeGC, gc_tracer);
153 }
154 
155 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
156   trace_heap(GCWhen::AfterGC, gc_tracer);
157 }
158 
159 // WhiteBox API support for concurrent collectors.  These are the
160 // default implementations, for collectors which don't support this
161 // feature.
162 bool CollectedHeap::supports_concurrent_phase_control() const {
163   return false;
164 }
165 
166 const char* const* CollectedHeap::concurrent_phases() const {
167   static const char* const result[] = { NULL };
168   return result;
169 }
170 
171 bool CollectedHeap::request_concurrent_phase(const char* phase) {
172   return false;
173 }
174 
175 // Memory state functions.
176 
177 
178 CollectedHeap::CollectedHeap() :
179   _barrier_set(NULL),
180   _is_gc_active(false),
181   _total_collections(0),
182   _total_full_collections(0),
183   _gc_cause(GCCause::_no_gc),
184   _gc_lastcause(GCCause::_no_gc)
185 {
186   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
187   const size_t elements_per_word = HeapWordSize / sizeof(jint);
188   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
189                                              max_len / elements_per_word);
190 
191   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
192   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
193 
194   if (UsePerfData) {
195     EXCEPTION_MARK;
196 
197     // create the gc cause jvmstat counters
198     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
199                              80, GCCause::to_string(_gc_cause), CHECK);
200 
201     _perf_gc_lastcause =
202                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
203                              80, GCCause::to_string(_gc_lastcause), CHECK);
204   }
205 
206   // Create the ring log
207   if (LogEvents) {
208     _gc_heap_log = new GCHeapLog();
209   } else {
210     _gc_heap_log = NULL;
211   }
212 }
213 
214 // This interface assumes that it's being called by the
215 // vm thread. It collects the heap assuming that the
216 // heap lock is already held and that we are executing in
217 // the context of the vm thread.
218 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
219   assert(Thread::current()->is_VM_thread(), "Precondition#1");
220   assert(Heap_lock->is_locked(), "Precondition#2");
221   GCCauseSetter gcs(this, cause);
222   switch (cause) {
223     case GCCause::_heap_inspection:
224     case GCCause::_heap_dump:
225     case GCCause::_metadata_GC_threshold : {
226       HandleMark hm;
227       do_full_collection(false);        // don't clear all soft refs
228       break;
229     }
230     case GCCause::_metadata_GC_clear_soft_refs: {
231       HandleMark hm;
232       do_full_collection(true);         // do clear all soft refs
233       break;
234     }
235     default:
236       ShouldNotReachHere(); // Unexpected use of this function
237   }
238 }
239 
240 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
241                                                             size_t word_size,
242                                                             Metaspace::MetadataType mdtype) {
243   uint loop_count = 0;
244   uint gc_count = 0;
245   uint full_gc_count = 0;
246 
247   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
248 
249   do {
250     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
251     if (result != NULL) {
252       return result;
253     }
254 
255     if (GCLocker::is_active_and_needs_gc()) {
256       // If the GCLocker is active, just expand and allocate.
257       // If that does not succeed, wait if this thread is not
258       // in a critical section itself.
259       result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
260       if (result != NULL) {
261         return result;
262       }
263       JavaThread* jthr = JavaThread::current();
264       if (!jthr->in_critical()) {
265         // Wait for JNI critical section to be exited
266         GCLocker::stall_until_clear();
267         // The GC invoked by the last thread leaving the critical
268         // section will be a young collection and a full collection
269         // is (currently) needed for unloading classes so continue
270         // to the next iteration to get a full GC.
271         continue;
272       } else {
273         if (CheckJNICalls) {
274           fatal("Possible deadlock due to allocating while"
275                 " in jni critical section");
276         }
277         return NULL;
278       }
279     }
280 
281     {  // Need lock to get self consistent gc_count's
282       MutexLocker ml(Heap_lock);
283       gc_count      = Universe::heap()->total_collections();
284       full_gc_count = Universe::heap()->total_full_collections();
285     }
286 
287     // Generate a VM operation
288     VM_CollectForMetadataAllocation op(loader_data,
289                                        word_size,
290                                        mdtype,
291                                        gc_count,
292                                        full_gc_count,
293                                        GCCause::_metadata_GC_threshold);
294     VMThread::execute(&op);
295 
296     // If GC was locked out, try again. Check before checking success because the
297     // prologue could have succeeded and the GC still have been locked out.
298     if (op.gc_locked()) {
299       continue;
300     }
301 
302     if (op.prologue_succeeded()) {
303       return op.result();
304     }
305     loop_count++;
306     if ((QueuedAllocationWarningCount > 0) &&
307         (loop_count % QueuedAllocationWarningCount == 0)) {
308       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
309                             " size=" SIZE_FORMAT, loop_count, word_size);
310     }
311   } while (true);  // Until a GC is done
312 }
313 
314 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
315   _barrier_set = barrier_set;
316   BarrierSet::set_bs(barrier_set);
317 }
318 
319 #ifndef PRODUCT
320 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
321   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
322     for (size_t slot = 0; slot < size; slot += 1) {
323       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
324              "Found badHeapWordValue in post-allocation check");
325     }
326   }
327 }
328 
329 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
330   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
331     for (size_t slot = 0; slot < size; slot += 1) {
332       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
333              "Found non badHeapWordValue in pre-allocation check");
334     }
335   }
336 }
337 #endif // PRODUCT
338 
339 #ifdef ASSERT
340 void CollectedHeap::check_for_valid_allocation_state() {
341   Thread *thread = Thread::current();
342   // How to choose between a pending exception and a potential
343   // OutOfMemoryError?  Don't allow pending exceptions.
344   // This is a VM policy failure, so how do we exhaustively test it?
345   assert(!thread->has_pending_exception(),
346          "shouldn't be allocating with pending exception");
347   if (StrictSafepointChecks) {
348     assert(thread->allow_allocation(),
349            "Allocation done by thread for which allocation is blocked "
350            "by No_Allocation_Verifier!");
351     // Allocation of an oop can always invoke a safepoint,
352     // hence, the true argument
353     thread->check_for_valid_safepoint_state(true);
354   }
355 }
356 #endif
357 
358 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
359 
360   // Retain tlab and allocate object in shared space if
361   // the amount free in the tlab is too large to discard.
362   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
363     thread->tlab().record_slow_allocation(size);
364     return NULL;
365   }
366 
367   // Discard tlab and allocate a new one.
368   // To minimize fragmentation, the last TLAB may be smaller than the rest.
369   size_t new_tlab_size = thread->tlab().compute_size(size);
370 
371   thread->tlab().clear_before_allocation();
372 
373   if (new_tlab_size == 0) {
374     return NULL;
375   }
376 
377   // Allocate a new TLAB...
378   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
379   if (obj == NULL) {
380     return NULL;
381   }
382 
383   AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
384 
385   if (ZeroTLAB) {
386     // ..and clear it.
387     Copy::zero_to_words(obj, new_tlab_size);
388   } else {
389     // ...and zap just allocated object.
390 #ifdef ASSERT
391     // Skip mangling the space corresponding to the object header to
392     // ensure that the returned space is not considered parsable by
393     // any concurrent GC thread.
394     size_t hdr_size = oopDesc::header_size();
395     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
396 #endif // ASSERT
397   }
398   thread->tlab().fill(obj, obj + size, new_tlab_size);
399   return obj;
400 }
401 
402 size_t CollectedHeap::max_tlab_size() const {
403   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
404   // This restriction could be removed by enabling filling with multiple arrays.
405   // If we compute that the reasonable way as
406   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
407   // we'll overflow on the multiply, so we do the divide first.
408   // We actually lose a little by dividing first,
409   // but that just makes the TLAB  somewhat smaller than the biggest array,
410   // which is fine, since we'll be able to fill that.
411   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
412               sizeof(jint) *
413               ((juint) max_jint / (size_t) HeapWordSize);
414   return align_down(max_int_size, MinObjAlignment);
415 }
416 
417 size_t CollectedHeap::filler_array_hdr_size() {
418   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
419 }
420 
421 size_t CollectedHeap::filler_array_min_size() {
422   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
423 }
424 
425 #ifdef ASSERT
426 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
427 {
428   assert(words >= min_fill_size(), "too small to fill");
429   assert(is_object_aligned(words), "unaligned size");
430   assert(Universe::heap()->is_in_reserved(start), "not in heap");
431   assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
432 }
433 
434 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
435 {
436   if (ZapFillerObjects && zap) {
437     Copy::fill_to_words(start + filler_array_hdr_size(),
438                         words - filler_array_hdr_size(), 0XDEAFBABE);
439   }
440 }
441 #endif // ASSERT
442 
443 void
444 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
445 {
446   assert(words >= filler_array_min_size(), "too small for an array");
447   assert(words <= filler_array_max_size(), "too big for a single object");
448 
449   const size_t payload_size = words - filler_array_hdr_size();
450   const size_t len = payload_size * HeapWordSize / sizeof(jint);
451   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
452 
453   // Set the length first for concurrent GC.
454   ((arrayOop)start)->set_length((int)len);
455   post_allocation_setup_common(Universe::intArrayKlassObj(), start);
456   DEBUG_ONLY(zap_filler_array(start, words, zap);)
457 }
458 
459 void
460 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
461 {
462   assert(words <= filler_array_max_size(), "too big for a single object");
463 
464   if (words >= filler_array_min_size()) {
465     fill_with_array(start, words, zap);
466   } else if (words > 0) {
467     assert(words == min_fill_size(), "unaligned size");
468     post_allocation_setup_common(SystemDictionary::Object_klass(), start);
469   }
470 }
471 
472 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
473 {
474   DEBUG_ONLY(fill_args_check(start, words);)
475   HandleMark hm;  // Free handles before leaving.
476   fill_with_object_impl(start, words, zap);
477 }
478 
479 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
480 {
481   DEBUG_ONLY(fill_args_check(start, words);)
482   HandleMark hm;  // Free handles before leaving.
483 
484   // Multiple objects may be required depending on the filler array maximum size. Fill
485   // the range up to that with objects that are filler_array_max_size sized. The
486   // remainder is filled with a single object.
487   const size_t min = min_fill_size();
488   const size_t max = filler_array_max_size();
489   while (words > max) {
490     const size_t cur = (words - max) >= min ? max : max - min;
491     fill_with_array(start, cur, zap);
492     start += cur;
493     words -= cur;
494   }
495 
496   fill_with_object_impl(start, words, zap);
497 }
498 
499 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
500   guarantee(false, "thread-local allocation buffers not supported");
501   return NULL;
502 }
503 
504 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
505   // The second disjunct in the assertion below makes a concession
506   // for the start-up verification done while the VM is being
507   // created. Callers be careful that you know that mutators
508   // aren't going to interfere -- for instance, this is permissible
509   // if we are still single-threaded and have either not yet
510   // started allocating (nothing much to verify) or we have
511   // started allocating but are now a full-fledged JavaThread
512   // (and have thus made our TLAB's) available for filling.
513   assert(SafepointSynchronize::is_at_safepoint() ||
514          !is_init_completed(),
515          "Should only be called at a safepoint or at start-up"
516          " otherwise concurrent mutator activity may make heap "
517          " unparsable again");
518   const bool use_tlab = UseTLAB;
519   // The main thread starts allocating via a TLAB even before it
520   // has added itself to the threads list at vm boot-up.
521   JavaThreadIteratorWithHandle jtiwh;
522   assert(!use_tlab || jtiwh.length() > 0,
523          "Attempt to fill tlabs before main thread has been added"
524          " to threads list is doomed to failure!");
525   BarrierSet *bs = barrier_set();
526   for (; JavaThread *thread = jtiwh.next(); ) {
527      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
528      bs->make_parsable(thread);
529   }
530 }
531 
532 void CollectedHeap::accumulate_statistics_all_tlabs() {
533   if (UseTLAB) {
534     assert(SafepointSynchronize::is_at_safepoint() ||
535          !is_init_completed(),
536          "should only accumulate statistics on tlabs at safepoint");
537 
538     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
539   }
540 }
541 
542 void CollectedHeap::resize_all_tlabs() {
543   if (UseTLAB) {
544     assert(SafepointSynchronize::is_at_safepoint() ||
545          !is_init_completed(),
546          "should only resize tlabs at safepoint");
547 
548     ThreadLocalAllocBuffer::resize_all_tlabs();
549   }
550 }
551 
552 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
553   assert(timer != NULL, "timer is null");
554   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
555     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
556     HeapDumper::dump_heap();
557   }
558 
559   LogTarget(Trace, gc, classhisto) lt;
560   if (lt.is_enabled()) {
561     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
562     ResourceMark rm;
563     LogStream ls(lt);
564     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
565     inspector.doit();
566   }
567 }
568 
569 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
570   full_gc_dump(timer, true);
571 }
572 
573 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
574   full_gc_dump(timer, false);
575 }
576 
577 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
578   // It is important to do this in a way such that concurrent readers can't
579   // temporarily think something is in the heap.  (Seen this happen in asserts.)
580   _reserved.set_word_size(0);
581   _reserved.set_start(start);
582   _reserved.set_end(end);
583 }
584 
585 void CollectedHeap::post_initialize() {
586   initialize_serviceability();
587 }