1 /*
  2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/systemDictionary.hpp"
 27 #include "gc/shared/allocTracer.hpp"
 28 #include "gc/shared/barrierSet.inline.hpp"
 29 #include "gc/shared/collectedHeap.hpp"
 30 #include "gc/shared/collectedHeap.inline.hpp"
 31 #include "gc/shared/gcHeapSummary.hpp"
 32 #include "gc/shared/gcTrace.hpp"
 33 #include "gc/shared/gcTraceTime.inline.hpp"
 34 #include "gc/shared/gcWhen.hpp"
 35 #include "gc/shared/vmGCOperations.hpp"
 36 #include "logging/log.hpp"
 37 #include "memory/metaspace.hpp"
 38 #include "memory/resourceArea.hpp"
 39 #include "oops/instanceMirrorKlass.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "runtime/init.hpp"
 42 #include "runtime/thread.inline.hpp"
 43 #include "runtime/threadSMR.hpp"
 44 #include "services/heapDumper.hpp"
 45 #include "utilities/align.hpp"
 46 
 47 
 48 #ifdef ASSERT
 49 int CollectedHeap::_fire_out_of_memory_count = 0;
 50 #endif
 51 
 52 size_t CollectedHeap::_filler_array_max_size = 0;
 53 
 54 template <>
 55 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
 56   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
 57   st->print_raw(m);
 58 }
 59 
 60 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
 61   if (!should_log()) {
 62     return;
 63   }
 64 
 65   double timestamp = fetch_timestamp();
 66   MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
 67   int index = compute_log_index();
 68   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
 69   _records[index].timestamp = timestamp;
 70   _records[index].data.is_before = before;
 71   stringStream st(_records[index].data.buffer(), _records[index].data.size());
 72 
 73   st.print_cr("{Heap %s GC invocations=%u (full %u):",
 74                  before ? "before" : "after",
 75                  heap->total_collections(),
 76                  heap->total_full_collections());
 77 
 78   heap->print_on(&st);
 79   st.print_cr("}");
 80 }
 81 
 82 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
 83   size_t capacity_in_words = capacity() / HeapWordSize;
 84 
 85   return VirtualSpaceSummary(
 86     reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
 87 }
 88 
 89 GCHeapSummary CollectedHeap::create_heap_summary() {
 90   VirtualSpaceSummary heap_space = create_heap_space_summary();
 91   return GCHeapSummary(heap_space, used());
 92 }
 93 
 94 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
 95   const MetaspaceSizes meta_space(
 96       MetaspaceAux::committed_bytes(),
 97       MetaspaceAux::used_bytes(),
 98       MetaspaceAux::reserved_bytes());
 99   const MetaspaceSizes data_space(
100       MetaspaceAux::committed_bytes(Metaspace::NonClassType),
101       MetaspaceAux::used_bytes(Metaspace::NonClassType),
102       MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
103   const MetaspaceSizes class_space(
104       MetaspaceAux::committed_bytes(Metaspace::ClassType),
105       MetaspaceAux::used_bytes(Metaspace::ClassType),
106       MetaspaceAux::reserved_bytes(Metaspace::ClassType));
107 
108   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
109     MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);
110   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
111     MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);
112 
113   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
114                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
115 }
116 
117 void CollectedHeap::print_heap_before_gc() {
118   Universe::print_heap_before_gc();
119   if (_gc_heap_log != NULL) {
120     _gc_heap_log->log_heap_before(this);
121   }
122 }
123 
124 void CollectedHeap::print_heap_after_gc() {
125   Universe::print_heap_after_gc();
126   if (_gc_heap_log != NULL) {
127     _gc_heap_log->log_heap_after(this);
128   }
129 }
130 
131 void CollectedHeap::print_on_error(outputStream* st) const {
132   st->print_cr("Heap:");
133   print_extended_on(st);
134   st->cr();
135 
136   _barrier_set->print_on(st);
137 }
138 
139 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
140   const GCHeapSummary& heap_summary = create_heap_summary();
141   gc_tracer->report_gc_heap_summary(when, heap_summary);
142 
143   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
144   gc_tracer->report_metaspace_summary(when, metaspace_summary);
145 }
146 
147 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
148   trace_heap(GCWhen::BeforeGC, gc_tracer);
149 }
150 
151 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
152   trace_heap(GCWhen::AfterGC, gc_tracer);
153 }
154 
155 // WhiteBox API support for concurrent collectors.  These are the
156 // default implementations, for collectors which don't support this
157 // feature.
158 bool CollectedHeap::supports_concurrent_phase_control() const {
159   return false;
160 }
161 
162 const char* const* CollectedHeap::concurrent_phases() const {
163   static const char* const result[] = { NULL };
164   return result;
165 }
166 
167 bool CollectedHeap::request_concurrent_phase(const char* phase) {
168   return false;
169 }
170 
171 // Memory state functions.
172 
173 
174 CollectedHeap::CollectedHeap() :
175   _barrier_set(NULL),
176   _is_gc_active(false),
177   _total_collections(0),
178   _total_full_collections(0),
179   _gc_cause(GCCause::_no_gc),
180   _gc_lastcause(GCCause::_no_gc)
181 {
182   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
183   const size_t elements_per_word = HeapWordSize / sizeof(jint);
184   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
185                                              max_len / elements_per_word);
186 
187   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
188   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
189 
190   if (UsePerfData) {
191     EXCEPTION_MARK;
192 
193     // create the gc cause jvmstat counters
194     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
195                              80, GCCause::to_string(_gc_cause), CHECK);
196 
197     _perf_gc_lastcause =
198                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
199                              80, GCCause::to_string(_gc_lastcause), CHECK);
200   }
201 
202   // Create the ring log
203   if (LogEvents) {
204     _gc_heap_log = new GCHeapLog();
205   } else {
206     _gc_heap_log = NULL;
207   }
208 }
209 
210 // This interface assumes that it's being called by the
211 // vm thread. It collects the heap assuming that the
212 // heap lock is already held and that we are executing in
213 // the context of the vm thread.
214 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
215   assert(Thread::current()->is_VM_thread(), "Precondition#1");
216   assert(Heap_lock->is_locked(), "Precondition#2");
217   GCCauseSetter gcs(this, cause);
218   switch (cause) {
219     case GCCause::_heap_inspection:
220     case GCCause::_heap_dump:
221     case GCCause::_metadata_GC_threshold : {
222       HandleMark hm;
223       do_full_collection(false);        // don't clear all soft refs
224       break;
225     }
226     case GCCause::_metadata_GC_clear_soft_refs: {
227       HandleMark hm;
228       do_full_collection(true);         // do clear all soft refs
229       break;
230     }
231     default:
232       ShouldNotReachHere(); // Unexpected use of this function
233   }
234 }
235 
236 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
237   _barrier_set = barrier_set;
238   BarrierSet::set_bs(barrier_set);
239 }
240 
241 #ifndef PRODUCT
242 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
243   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
244     for (size_t slot = 0; slot < size; slot += 1) {
245       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
246              "Found badHeapWordValue in post-allocation check");
247     }
248   }
249 }
250 
251 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
252   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
253     for (size_t slot = 0; slot < size; slot += 1) {
254       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
255              "Found non badHeapWordValue in pre-allocation check");
256     }
257   }
258 }
259 #endif // PRODUCT
260 
261 #ifdef ASSERT
262 void CollectedHeap::check_for_valid_allocation_state() {
263   Thread *thread = Thread::current();
264   // How to choose between a pending exception and a potential
265   // OutOfMemoryError?  Don't allow pending exceptions.
266   // This is a VM policy failure, so how do we exhaustively test it?
267   assert(!thread->has_pending_exception(),
268          "shouldn't be allocating with pending exception");
269   if (StrictSafepointChecks) {
270     assert(thread->allow_allocation(),
271            "Allocation done by thread for which allocation is blocked "
272            "by No_Allocation_Verifier!");
273     // Allocation of an oop can always invoke a safepoint,
274     // hence, the true argument
275     thread->check_for_valid_safepoint_state(true);
276   }
277 }
278 #endif
279 
280 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
281 
282   // Retain tlab and allocate object in shared space if
283   // the amount free in the tlab is too large to discard.
284   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
285     thread->tlab().record_slow_allocation(size);
286     return NULL;
287   }
288 
289   // Discard tlab and allocate a new one.
290   // To minimize fragmentation, the last TLAB may be smaller than the rest.
291   size_t new_tlab_size = thread->tlab().compute_size(size);
292 
293   thread->tlab().clear_before_allocation();
294 
295   if (new_tlab_size == 0) {
296     return NULL;
297   }
298 
299   // Allocate a new TLAB...
300   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
301   if (obj == NULL) {
302     return NULL;
303   }
304 
305   AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
306 
307   if (ZeroTLAB) {
308     // ..and clear it.
309     Copy::zero_to_words(obj, new_tlab_size);
310   } else {
311     // ...and zap just allocated object.
312 #ifdef ASSERT
313     // Skip mangling the space corresponding to the object header to
314     // ensure that the returned space is not considered parsable by
315     // any concurrent GC thread.
316     size_t hdr_size = oopDesc::header_size();
317     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
318 #endif // ASSERT
319   }
320   thread->tlab().fill(obj, obj + size, new_tlab_size);
321   return obj;
322 }
323 
324 size_t CollectedHeap::max_tlab_size() const {
325   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
326   // This restriction could be removed by enabling filling with multiple arrays.
327   // If we compute that the reasonable way as
328   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
329   // we'll overflow on the multiply, so we do the divide first.
330   // We actually lose a little by dividing first,
331   // but that just makes the TLAB  somewhat smaller than the biggest array,
332   // which is fine, since we'll be able to fill that.
333   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
334               sizeof(jint) *
335               ((juint) max_jint / (size_t) HeapWordSize);
336   return align_down(max_int_size, MinObjAlignment);
337 }
338 
339 size_t CollectedHeap::filler_array_hdr_size() {
340   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
341 }
342 
343 size_t CollectedHeap::filler_array_min_size() {
344   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
345 }
346 
347 #ifdef ASSERT
348 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
349 {
350   assert(words >= min_fill_size(), "too small to fill");
351   assert(is_object_aligned(words), "unaligned size");
352   assert(Universe::heap()->is_in_reserved(start), "not in heap");
353   assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
354 }
355 
356 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
357 {
358   if (ZapFillerObjects && zap) {
359     Copy::fill_to_words(start + filler_array_hdr_size(),
360                         words - filler_array_hdr_size(), 0XDEAFBABE);
361   }
362 }
363 #endif // ASSERT
364 
365 void
366 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
367 {
368   assert(words >= filler_array_min_size(), "too small for an array");
369   assert(words <= filler_array_max_size(), "too big for a single object");
370 
371   const size_t payload_size = words - filler_array_hdr_size();
372   const size_t len = payload_size * HeapWordSize / sizeof(jint);
373   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
374 
375   // Set the length first for concurrent GC.
376   ((arrayOop)start)->set_length((int)len);
377   post_allocation_setup_common(Universe::intArrayKlassObj(), start);
378   DEBUG_ONLY(zap_filler_array(start, words, zap);)
379 }
380 
381 void
382 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
383 {
384   assert(words <= filler_array_max_size(), "too big for a single object");
385 
386   if (words >= filler_array_min_size()) {
387     fill_with_array(start, words, zap);
388   } else if (words > 0) {
389     assert(words == min_fill_size(), "unaligned size");
390     post_allocation_setup_common(SystemDictionary::Object_klass(), start);
391   }
392 }
393 
394 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
395 {
396   DEBUG_ONLY(fill_args_check(start, words);)
397   HandleMark hm;  // Free handles before leaving.
398   fill_with_object_impl(start, words, zap);
399 }
400 
401 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
402 {
403   DEBUG_ONLY(fill_args_check(start, words);)
404   HandleMark hm;  // Free handles before leaving.
405 
406   // Multiple objects may be required depending on the filler array maximum size. Fill
407   // the range up to that with objects that are filler_array_max_size sized. The
408   // remainder is filled with a single object.
409   const size_t min = min_fill_size();
410   const size_t max = filler_array_max_size();
411   while (words > max) {
412     const size_t cur = (words - max) >= min ? max : max - min;
413     fill_with_array(start, cur, zap);
414     start += cur;
415     words -= cur;
416   }
417 
418   fill_with_object_impl(start, words, zap);
419 }
420 
421 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
422   guarantee(false, "thread-local allocation buffers not supported");
423   return NULL;
424 }
425 
426 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
427   // The second disjunct in the assertion below makes a concession
428   // for the start-up verification done while the VM is being
429   // created. Callers be careful that you know that mutators
430   // aren't going to interfere -- for instance, this is permissible
431   // if we are still single-threaded and have either not yet
432   // started allocating (nothing much to verify) or we have
433   // started allocating but are now a full-fledged JavaThread
434   // (and have thus made our TLAB's) available for filling.
435   assert(SafepointSynchronize::is_at_safepoint() ||
436          !is_init_completed(),
437          "Should only be called at a safepoint or at start-up"
438          " otherwise concurrent mutator activity may make heap "
439          " unparsable again");
440   const bool use_tlab = UseTLAB;
441   // The main thread starts allocating via a TLAB even before it
442   // has added itself to the threads list at vm boot-up.
443   JavaThreadIteratorWithHandle jtiwh;
444   assert(!use_tlab || jtiwh.length() > 0,
445          "Attempt to fill tlabs before main thread has been added"
446          " to threads list is doomed to failure!");
447   BarrierSet *bs = barrier_set();
448   for (; JavaThread *thread = jtiwh.next(); ) {
449      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
450      bs->make_parsable(thread);
451   }
452 }
453 
454 void CollectedHeap::accumulate_statistics_all_tlabs() {
455   if (UseTLAB) {
456     assert(SafepointSynchronize::is_at_safepoint() ||
457          !is_init_completed(),
458          "should only accumulate statistics on tlabs at safepoint");
459 
460     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
461   }
462 }
463 
464 void CollectedHeap::resize_all_tlabs() {
465   if (UseTLAB) {
466     assert(SafepointSynchronize::is_at_safepoint() ||
467          !is_init_completed(),
468          "should only resize tlabs at safepoint");
469 
470     ThreadLocalAllocBuffer::resize_all_tlabs();
471   }
472 }
473 
474 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
475   assert(timer != NULL, "timer is null");
476   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
477     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
478     HeapDumper::dump_heap();
479   }
480 
481   LogTarget(Trace, gc, classhisto) lt;
482   if (lt.is_enabled()) {
483     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
484     ResourceMark rm;
485     LogStream ls(lt);
486     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
487     inspector.doit();
488   }
489 }
490 
491 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
492   full_gc_dump(timer, true);
493 }
494 
495 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
496   full_gc_dump(timer, false);
497 }
498 
499 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
500   // It is important to do this in a way such that concurrent readers can't
501   // temporarily think something is in the heap.  (Seen this happen in asserts.)
502   _reserved.set_word_size(0);
503   _reserved.set_start(start);
504   _reserved.set_end(end);
505 }
506 
507 void CollectedHeap::post_initialize() {
508   initialize_serviceability();
509 }