68 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
69 _records[index].timestamp = timestamp;
70 _records[index].data.is_before = before;
71 stringStream st(_records[index].data.buffer(), _records[index].data.size());
72 if (before) {
73 Universe::print_heap_before_gc(&st, true);
74 } else {
75 Universe::print_heap_after_gc(&st, true);
76 }
77 }
78
79 // Memory state functions.
80
81
82 CollectedHeap::CollectedHeap() : _n_par_threads(0)
83
84 {
85 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
86 const size_t elements_per_word = HeapWordSize / sizeof(jint);
87 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
88 max_len * elements_per_word);
89
90 _barrier_set = NULL;
91 _is_gc_active = false;
92 _total_collections = _total_full_collections = 0;
93 _gc_cause = _gc_lastcause = GCCause::_no_gc;
94 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
95 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
96
97 if (UsePerfData) {
98 EXCEPTION_MARK;
99
100 // create the gc cause jvmstat counters
101 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
102 80, GCCause::to_string(_gc_cause), CHECK);
103
104 _perf_gc_lastcause =
105 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
106 80, GCCause::to_string(_gc_lastcause), CHECK);
107 }
108 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
286 // Defer the card mark
287 thread->set_deferred_card_mark(mr);
288 } else {
289 // Do the card mark
290 BarrierSet* bs = barrier_set();
291 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
292 bs->write_region(mr);
293 }
294 }
295 return new_obj;
296 }
297
298 size_t CollectedHeap::filler_array_hdr_size() {
299 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
300 }
301
302 size_t CollectedHeap::filler_array_min_size() {
303 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
304 }
305
306 size_t CollectedHeap::filler_array_max_size() {
307 return _filler_array_max_size;
308 }
309
310 #ifdef ASSERT
311 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
312 {
313 assert(words >= min_fill_size(), "too small to fill");
314 assert(words % MinObjAlignment == 0, "unaligned size");
315 assert(Universe::heap()->is_in_reserved(start), "not in heap");
316 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
317 }
318
319 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
320 {
321 if (ZapFillerObjects && zap) {
322 Copy::fill_to_words(start + filler_array_hdr_size(),
323 words - filler_array_hdr_size(), 0XDEAFBABE);
324 }
325 }
326 #endif // ASSERT
327
328 void
329 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
330 {
331 assert(words >= filler_array_min_size(), "too small for an array");
332 assert(words <= filler_array_max_size(), "too big for a single object");
333
334 const size_t payload_size = words - filler_array_hdr_size();
335 const size_t len = payload_size * HeapWordSize / sizeof(jint);
336
337 // Set the length first for concurrent GC.
338 ((arrayOop)start)->set_length((int)len);
339 post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
340 DEBUG_ONLY(zap_filler_array(start, words, zap);)
341 }
342
343 void
344 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
345 {
346 assert(words <= filler_array_max_size(), "too big for a single object");
347
348 if (words >= filler_array_min_size()) {
349 fill_with_array(start, words, zap);
350 } else if (words > 0) {
351 assert(words == min_fill_size(), "unaligned size");
352 post_allocation_setup_common(SystemDictionary::Object_klass(), start,
353 words);
354 }
355 }
|
68 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
69 _records[index].timestamp = timestamp;
70 _records[index].data.is_before = before;
71 stringStream st(_records[index].data.buffer(), _records[index].data.size());
72 if (before) {
73 Universe::print_heap_before_gc(&st, true);
74 } else {
75 Universe::print_heap_after_gc(&st, true);
76 }
77 }
78
79 // Memory state functions.
80
81
82 CollectedHeap::CollectedHeap() : _n_par_threads(0)
83
84 {
85 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
86 const size_t elements_per_word = HeapWordSize / sizeof(jint);
87 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
88 max_len / elements_per_word);
89
90 _barrier_set = NULL;
91 _is_gc_active = false;
92 _total_collections = _total_full_collections = 0;
93 _gc_cause = _gc_lastcause = GCCause::_no_gc;
94 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
95 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
96
97 if (UsePerfData) {
98 EXCEPTION_MARK;
99
100 // create the gc cause jvmstat counters
101 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
102 80, GCCause::to_string(_gc_cause), CHECK);
103
104 _perf_gc_lastcause =
105 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
106 80, GCCause::to_string(_gc_lastcause), CHECK);
107 }
108 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
286 // Defer the card mark
287 thread->set_deferred_card_mark(mr);
288 } else {
289 // Do the card mark
290 BarrierSet* bs = barrier_set();
291 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
292 bs->write_region(mr);
293 }
294 }
295 return new_obj;
296 }
297
298 size_t CollectedHeap::filler_array_hdr_size() {
299 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
300 }
301
302 size_t CollectedHeap::filler_array_min_size() {
303 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
304 }
305
306 #ifdef ASSERT
307 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
308 {
309 assert(words >= min_fill_size(), "too small to fill");
310 assert(words % MinObjAlignment == 0, "unaligned size");
311 assert(Universe::heap()->is_in_reserved(start), "not in heap");
312 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
313 }
314
315 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
316 {
317 if (ZapFillerObjects && zap) {
318 Copy::fill_to_words(start + filler_array_hdr_size(),
319 words - filler_array_hdr_size(), 0XDEAFBABE);
320 }
321 }
322 #endif // ASSERT
323
324 void
325 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
326 {
327 assert(words >= filler_array_min_size(), "too small for an array");
328 assert(words <= filler_array_max_size(), "too big for a single object");
329
330 const size_t payload_size = words - filler_array_hdr_size();
331 const size_t len = payload_size * HeapWordSize / sizeof(jint);
332 assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len));
333
334 // Set the length first for concurrent GC.
335 ((arrayOop)start)->set_length((int)len);
336 post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
337 DEBUG_ONLY(zap_filler_array(start, words, zap);)
338 }
339
340 void
341 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
342 {
343 assert(words <= filler_array_max_size(), "too big for a single object");
344
345 if (words >= filler_array_min_size()) {
346 fill_with_array(start, words, zap);
347 } else if (words > 0) {
348 assert(words == min_fill_size(), "unaligned size");
349 post_allocation_setup_common(SystemDictionary::Object_klass(), start,
350 words);
351 }
352 }
|