11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc/shared/allocTracer.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "gc/shared/gcLocker.inline.hpp"
32 #include "gc/shared/gcHeapSummary.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/gcWhen.hpp"
36 #include "gc/shared/memAllocator.hpp"
37 #include "gc/shared/vmGCOperations.hpp"
38 #include "logging/log.hpp"
39 #include "memory/metaspace.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "oops/instanceMirrorKlass.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/init.hpp"
45 #include "runtime/thread.inline.hpp"
46 #include "runtime/threadSMR.hpp"
47 #include "runtime/vmThread.hpp"
48 #include "services/heapDumper.hpp"
49 #include "utilities/align.hpp"
50 #include "utilities/copy.hpp"
51
52 class ClassLoaderData;
53
54 #ifdef ASSERT
55 int CollectedHeap::_fire_out_of_memory_count = 0;
56 #endif
57
58 size_t CollectedHeap::_filler_array_max_size = 0;
59
60 template <>
61 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
62 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
63 st->print_raw(m);
64 }
65
66 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
67 if (!should_log()) {
68 return;
69 }
70
71 double timestamp = fetch_timestamp();
72 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
73 int index = compute_log_index();
74 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
75 _records[index].timestamp = timestamp;
76 _records[index].data.is_before = before;
77 stringStream st(_records[index].data.buffer(), _records[index].data.size());
78
79 st.print_cr("{Heap %s GC invocations=%u (full %u):",
173 bool CollectedHeap::request_concurrent_phase(const char* phase) {
174 return false;
175 }
176
177 bool CollectedHeap::is_oop(oop object) const {
178 if (!check_obj_alignment(object)) {
179 return false;
180 }
181
182 if (!is_in_reserved(object)) {
183 return false;
184 }
185
186 if (is_in_reserved(object->klass_or_null())) {
187 return false;
188 }
189
190 return true;
191 }
192
193 // Memory state functions.
194
195
196 CollectedHeap::CollectedHeap() :
197 _is_gc_active(false),
198 _total_collections(0),
199 _total_full_collections(0),
200 _gc_cause(GCCause::_no_gc),
201 _gc_lastcause(GCCause::_no_gc)
202 {
203 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
204 const size_t elements_per_word = HeapWordSize / sizeof(jint);
205 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
206 max_len / elements_per_word);
207
208 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
209 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
210
211 if (UsePerfData) {
212 EXCEPTION_MARK;
213
214 // create the gc cause jvmstat counters
215 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
216 80, GCCause::to_string(_gc_cause), CHECK);
217
218 _perf_gc_lastcause =
219 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
220 80, GCCause::to_string(_gc_lastcause), CHECK);
221 }
222
223 // Create the ring log
224 if (LogEvents) {
225 _gc_heap_log = new GCHeapLog();
226 } else {
340 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
341 "Found non badHeapWordValue in pre-allocation check");
342 }
343 }
344 }
345 #endif // PRODUCT
346
347 size_t CollectedHeap::max_tlab_size() const {
348 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
349 // This restriction could be removed by enabling filling with multiple arrays.
350 // If we compute that the reasonable way as
351 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
352 // we'll overflow on the multiply, so we do the divide first.
353 // We actually lose a little by dividing first,
354 // but that just makes the TLAB somewhat smaller than the biggest array,
355 // which is fine, since we'll be able to fill that.
356 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
357 sizeof(jint) *
358 ((juint) max_jint / (size_t) HeapWordSize);
359 return align_down(max_int_size, MinObjAlignment);
360 }
361
362 size_t CollectedHeap::filler_array_hdr_size() {
363 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
364 }
365
366 size_t CollectedHeap::filler_array_min_size() {
367 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
368 }
369
370 #ifdef ASSERT
371 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
372 {
373 assert(words >= min_fill_size(), "too small to fill");
374 assert(is_object_aligned(words), "unaligned size");
375 assert(Universe::heap()->is_in_reserved(start), "not in heap");
376 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
377 }
378
379 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
380 {
381 if (ZapFillerObjects && zap) {
382 Copy::fill_to_words(start + filler_array_hdr_size(),
383 words - filler_array_hdr_size(), 0XDEAFBABE);
384 }
385 }
386 #endif // ASSERT
387
388 void
389 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
390 {
391 assert(words >= filler_array_min_size(), "too small for an array");
392 assert(words <= filler_array_max_size(), "too big for a single object");
393
394 const size_t payload_size = words - filler_array_hdr_size();
395 const size_t len = payload_size * HeapWordSize / sizeof(jint);
396 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
397
398 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
399 allocator.initialize(start);
400 DEBUG_ONLY(zap_filler_array(start, words, zap);)
401 }
402
403 void
404 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
405 {
406 assert(words <= filler_array_max_size(), "too big for a single object");
407
408 if (words >= filler_array_min_size()) {
409 fill_with_array(start, words, zap);
410 } else if (words > 0) {
411 assert(words == min_fill_size(), "unaligned size");
412 ObjAllocator allocator(SystemDictionary::Object_klass(), words);
413 allocator.initialize(start);
414 }
415 }
416
417 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
418 {
419 DEBUG_ONLY(fill_args_check(start, words);)
420 HandleMark hm; // Free handles before leaving.
421 fill_with_object_impl(start, words, zap);
422 }
423
424 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
425 {
426 DEBUG_ONLY(fill_args_check(start, words);)
427 HandleMark hm; // Free handles before leaving.
428
429 // Multiple objects may be required depending on the filler array maximum size. Fill
430 // the range up to that with objects that are filler_array_max_size sized. The
431 // remainder is filled with a single object.
432 const size_t min = min_fill_size();
433 const size_t max = filler_array_max_size();
434 while (words > max) {
435 const size_t cur = (words - max) >= min ? max : max - min;
436 fill_with_array(start, cur, zap);
437 start += cur;
438 words -= cur;
439 }
440
441 fill_with_object_impl(start, words, zap);
442 }
443
444 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
445 CollectedHeap::fill_with_object(start, end, zap);
446 }
447
448 size_t CollectedHeap::min_dummy_object_size() const {
449 return oopDesc::header_size();
450 }
451
452 size_t CollectedHeap::tlab_alloc_reserve() const {
453 size_t min_size = min_dummy_object_size();
454 return min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
455 }
456
457 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
458 size_t requested_size,
459 size_t* actual_size) {
460 guarantee(false, "thread-local allocation buffers not supported");
461 return NULL;
462 }
463
464 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
465 ObjAllocator allocator(klass, size, THREAD);
466 return allocator.allocate();
467 }
468
469 oop CollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
470 ObjArrayAllocator allocator(klass, size, length, do_zero, THREAD);
471 return allocator.allocate();
472 }
473
474 oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) {
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc/shared/allocTracer.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "gc/shared/fill.hpp"
32 #include "gc/shared/gcLocker.inline.hpp"
33 #include "gc/shared/gcHeapSummary.hpp"
34 #include "gc/shared/gcTrace.hpp"
35 #include "gc/shared/gcTraceTime.inline.hpp"
36 #include "gc/shared/gcWhen.hpp"
37 #include "gc/shared/memAllocator.hpp"
38 #include "gc/shared/vmGCOperations.hpp"
39 #include "logging/log.hpp"
40 #include "memory/metaspace.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "oops/instanceMirrorKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/handles.inline.hpp"
45 #include "runtime/init.hpp"
46 #include "runtime/thread.inline.hpp"
47 #include "runtime/threadSMR.hpp"
48 #include "runtime/vmThread.hpp"
49 #include "services/heapDumper.hpp"
50 #include "utilities/align.hpp"
51 #include "utilities/copy.hpp"
52
53 class ClassLoaderData;
54
55 #ifdef ASSERT
56 int CollectedHeap::_fire_out_of_memory_count = 0;
57 #endif
58
59 template <>
60 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
61 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
62 st->print_raw(m);
63 }
64
65 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
66 if (!should_log()) {
67 return;
68 }
69
70 double timestamp = fetch_timestamp();
71 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
72 int index = compute_log_index();
73 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
74 _records[index].timestamp = timestamp;
75 _records[index].data.is_before = before;
76 stringStream st(_records[index].data.buffer(), _records[index].data.size());
77
78 st.print_cr("{Heap %s GC invocations=%u (full %u):",
172 bool CollectedHeap::request_concurrent_phase(const char* phase) {
173 return false;
174 }
175
176 bool CollectedHeap::is_oop(oop object) const {
177 if (!check_obj_alignment(object)) {
178 return false;
179 }
180
181 if (!is_in_reserved(object)) {
182 return false;
183 }
184
185 if (is_in_reserved(object->klass_or_null())) {
186 return false;
187 }
188
189 return true;
190 }
191
192 CollectedHeap::CollectedHeap() :
193 _is_gc_active(false),
194 _total_collections(0),
195 _total_full_collections(0),
196 _gc_cause(GCCause::_no_gc),
197 _gc_lastcause(GCCause::_no_gc)
198 {
199 Fill::initialize();
200
201 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
202 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
203
204 if (UsePerfData) {
205 EXCEPTION_MARK;
206
207 // create the gc cause jvmstat counters
208 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
209 80, GCCause::to_string(_gc_cause), CHECK);
210
211 _perf_gc_lastcause =
212 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
213 80, GCCause::to_string(_gc_lastcause), CHECK);
214 }
215
216 // Create the ring log
217 if (LogEvents) {
218 _gc_heap_log = new GCHeapLog();
219 } else {
333 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
334 "Found non badHeapWordValue in pre-allocation check");
335 }
336 }
337 }
338 #endif // PRODUCT
339
340 size_t CollectedHeap::max_tlab_size() const {
341 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
342 // This restriction could be removed by enabling filling with multiple arrays.
343 // If we compute that the reasonable way as
344 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
345 // we'll overflow on the multiply, so we do the divide first.
346 // We actually lose a little by dividing first,
347 // but that just makes the TLAB somewhat smaller than the biggest array,
348 // which is fine, since we'll be able to fill that.
349 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
350 sizeof(jint) *
351 ((juint) max_jint / (size_t) HeapWordSize);
352 return align_down(max_int_size, MinObjAlignment);
353 }
354
355 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
356 size_t requested_size,
357 size_t* actual_size) {
358 guarantee(false, "thread-local allocation buffers not supported");
359 return NULL;
360 }
361
362 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
363 ObjAllocator allocator(klass, size, THREAD);
364 return allocator.allocate();
365 }
366
367 oop CollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
368 ObjArrayAllocator allocator(klass, size, length, do_zero, THREAD);
369 return allocator.allocate();
370 }
371
372 oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) {
|