13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/parallel/mutableSpace.hpp"
27 #include "gc/parallel/parallelScavengeHeap.hpp"
28 #include "gc/parallel/psOldGen.hpp"
29 #include "gc/parallel/psPromotionManager.inline.hpp"
30 #include "gc/parallel/psScavenge.inline.hpp"
31 #include "gc/shared/gcTrace.hpp"
32 #include "gc/shared/taskqueue.inline.hpp"
33 #include "memory/allocation.inline.hpp"
34 #include "memory/memRegion.hpp"
35 #include "memory/padded.inline.hpp"
36 #include "oops/instanceKlass.inline.hpp"
37 #include "oops/instanceMirrorKlass.inline.hpp"
38 #include "oops/objArrayKlass.inline.hpp"
39 #include "oops/oop.inline.hpp"
40
41 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
42 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
43 PSOldGen* PSPromotionManager::_old_gen = NULL;
44 MutableSpace* PSPromotionManager::_young_space = NULL;
45
46 void PSPromotionManager::initialize() {
47 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
48
49 _old_gen = heap->old_gen();
50 _young_space = heap->young_gen()->to_space();
51
52 // To prevent false sharing, we pad the PSPromotionManagers
82 }
83
84 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
85 assert(_manager_array != NULL, "Sanity");
86 return &_manager_array[ParallelGCThreads];
87 }
88
89 void PSPromotionManager::pre_scavenge() {
90 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
91
92 _young_space = heap->young_gen()->to_space();
93
94 for(uint i=0; i<ParallelGCThreads+1; i++) {
95 manager_array(i)->reset();
96 }
97 }
98
99 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
100 bool promotion_failure_occurred = false;
101
102 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
103 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
104 PSPromotionManager* manager = manager_array(i);
105 assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
106 if (manager->_promotion_failed_info.has_failed()) {
107 gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
108 promotion_failure_occurred = true;
109 }
110 manager->flush_labs();
111 }
112 return promotion_failure_occurred;
113 }
114
115 #if TASKQUEUE_STATS
116 void
117 PSPromotionManager::print_local_stats(outputStream* const out, uint i) const {
118 #define FMT " " SIZE_FORMAT_W(10)
119 out->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
120 _arrays_chunked, _array_chunks_processed);
121 #undef FMT
122 }
123
124 static const char* const pm_stats_hdr[] = {
125 " --------masked------- arrays array",
126 "thr push steal chunked chunks",
127 "--- ---------- ---------- ---------- ----------"
128 };
129
130 void
131 PSPromotionManager::print_taskqueue_stats(outputStream* const out) {
132 out->print_cr("== GC Tasks Stats, GC %3d",
133 ParallelScavengeHeap::heap()->total_collections());
134
135 TaskQueueStats totals;
136 out->print("thr "); TaskQueueStats::print_header(1, out); out->cr();
137 out->print("--- "); TaskQueueStats::print_header(2, out); out->cr();
138 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
139 TaskQueueStats& next = manager_array(i)->_claimed_stack_depth.stats;
140 out->print("%3d ", i); next.print(out); out->cr();
141 totals += next;
142 }
143 out->print("tot "); totals.print(out); out->cr();
144
145 const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
146 for (uint i = 0; i < hlines; ++i) out->print_cr("%s", pm_stats_hdr[i]);
147 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
148 manager_array(i)->print_local_stats(out, i);
149 }
150 }
151
351 template <class T>
352 static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) {
353 T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
354 if (PSScavenge::should_scavenge(referent_addr)) {
355 ReferenceProcessor* rp = PSScavenge::reference_processor();
356 if (rp->discover_reference(obj, klass->reference_type())) {
357 // reference already enqueued, referent and next will be traversed later
358 klass->InstanceKlass::oop_ps_push_contents(obj, pm);
359 return;
360 } else {
361 // treat referent as normal oop
362 pm->claim_or_forward_depth(referent_addr);
363 }
364 }
365 // Treat discovered as normal oop, if ref is not "active",
366 // i.e. if next is non-NULL.
367 T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
368 T next_oop = oopDesc::load_heap_oop(next_addr);
369 if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
370 T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
371 debug_only(
372 if(TraceReferenceGC && PrintGCDetails) {
373 gclog_or_tty->print_cr(" Process discovered as normal "
374 PTR_FORMAT, p2i(discovered_addr));
375 }
376 )
377 if (PSScavenge::should_scavenge(discovered_addr)) {
378 pm->claim_or_forward_depth(discovered_addr);
379 }
380 }
381 // Treat next as normal oop; next is a link in the reference queue.
382 if (PSScavenge::should_scavenge(next_addr)) {
383 pm->claim_or_forward_depth(next_addr);
384 }
385 klass->InstanceKlass::oop_ps_push_contents(obj, pm);
386 }
387
388 void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
389 if (UseCompressedOops) {
390 oop_ps_push_contents_specialized<narrowOop>(obj, this, pm);
391 } else {
392 oop_ps_push_contents_specialized<oop>(obj, this, pm);
393 }
394 }
395
396 void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
413 // pointer has been installed), then this thread owns
414 // it.
415 if (obj->cas_forward_to(obj, obj_mark)) {
416 // We won any races, we "own" this object.
417 assert(obj == obj->forwardee(), "Sanity");
418
419 _promotion_failed_info.register_copy_failure(obj->size());
420
421 push_contents(obj);
422
423 // Save the mark if needed
424 PSScavenge::oop_promotion_failed(obj, obj_mark);
425 } else {
426 // We lost, someone else "owns" this object
427 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
428
429 // No unallocation to worry about.
430 obj = obj->forwardee();
431 }
432
433 #ifndef PRODUCT
434 if (TraceScavenge) {
435 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " (%d)}",
436 "promotion-failure",
437 obj->klass()->internal_name(),
438 p2i(obj), obj->size());
439
440 }
441 #endif
442
443 return obj;
444 }
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/parallel/mutableSpace.hpp"
27 #include "gc/parallel/parallelScavengeHeap.hpp"
28 #include "gc/parallel/psOldGen.hpp"
29 #include "gc/parallel/psPromotionManager.inline.hpp"
30 #include "gc/parallel/psScavenge.inline.hpp"
31 #include "gc/shared/gcTrace.hpp"
32 #include "gc/shared/taskqueue.inline.hpp"
33 #include "logging/log.hpp"
34 #include "memory/allocation.inline.hpp"
35 #include "memory/memRegion.hpp"
36 #include "memory/padded.inline.hpp"
37 #include "oops/instanceKlass.inline.hpp"
38 #include "oops/instanceMirrorKlass.inline.hpp"
39 #include "oops/objArrayKlass.inline.hpp"
40 #include "oops/oop.inline.hpp"
41
42 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
43 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
44 PSOldGen* PSPromotionManager::_old_gen = NULL;
45 MutableSpace* PSPromotionManager::_young_space = NULL;
46
47 void PSPromotionManager::initialize() {
48 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
49
50 _old_gen = heap->old_gen();
51 _young_space = heap->young_gen()->to_space();
52
53 // To prevent false sharing, we pad the PSPromotionManagers
83 }
84
85 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
86 assert(_manager_array != NULL, "Sanity");
87 return &_manager_array[ParallelGCThreads];
88 }
89
90 void PSPromotionManager::pre_scavenge() {
91 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
92
93 _young_space = heap->young_gen()->to_space();
94
95 for(uint i=0; i<ParallelGCThreads+1; i++) {
96 manager_array(i)->reset();
97 }
98 }
99
100 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
101 bool promotion_failure_occurred = false;
102
103 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
104 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
105 PSPromotionManager* manager = manager_array(i);
106 assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
107 if (manager->_promotion_failed_info.has_failed()) {
108 gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
109 promotion_failure_occurred = true;
110 }
111 manager->flush_labs();
112 }
113 return promotion_failure_occurred;
114 }
115
116 #if TASKQUEUE_STATS
117 void
118 PSPromotionManager::print_local_stats(outputStream* const out, uint i) const {
119 #define FMT " " SIZE_FORMAT_W(10)
120 out->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
121 _arrays_chunked, _array_chunks_processed);
122 #undef FMT
123 }
124
125 static const char* const pm_stats_hdr[] = {
126 " --------masked------- arrays array",
127 "thr push steal chunked chunks",
128 "--- ---------- ---------- ---------- ----------"
129 };
130
131 void
132 PSPromotionManager::print_taskqueue_stats() {
133 LogHandle(gc, task, stats) log;
134 if (!log.is_develop()) {
135 return;
136 }
137 ResourceMark rm;
138 outputStream* out = log.develop_stream();
139 out->print_cr("== GC Tasks Stats, GC %3d",
140 ParallelScavengeHeap::heap()->total_collections());
141
142 TaskQueueStats totals;
143 out->print("thr "); TaskQueueStats::print_header(1, out); out->cr();
144 out->print("--- "); TaskQueueStats::print_header(2, out); out->cr();
145 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
146 TaskQueueStats& next = manager_array(i)->_claimed_stack_depth.stats;
147 out->print("%3d ", i); next.print(out); out->cr();
148 totals += next;
149 }
150 out->print("tot "); totals.print(out); out->cr();
151
152 const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
153 for (uint i = 0; i < hlines; ++i) out->print_cr("%s", pm_stats_hdr[i]);
154 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
155 manager_array(i)->print_local_stats(out, i);
156 }
157 }
158
358 template <class T>
359 static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) {
360 T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
361 if (PSScavenge::should_scavenge(referent_addr)) {
362 ReferenceProcessor* rp = PSScavenge::reference_processor();
363 if (rp->discover_reference(obj, klass->reference_type())) {
364 // reference already enqueued, referent and next will be traversed later
365 klass->InstanceKlass::oop_ps_push_contents(obj, pm);
366 return;
367 } else {
368 // treat referent as normal oop
369 pm->claim_or_forward_depth(referent_addr);
370 }
371 }
372 // Treat discovered as normal oop, if ref is not "active",
373 // i.e. if next is non-NULL.
374 T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
375 T next_oop = oopDesc::load_heap_oop(next_addr);
376 if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
377 T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
378 log_develop(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
379 if (PSScavenge::should_scavenge(discovered_addr)) {
380 pm->claim_or_forward_depth(discovered_addr);
381 }
382 }
383 // Treat next as normal oop; next is a link in the reference queue.
384 if (PSScavenge::should_scavenge(next_addr)) {
385 pm->claim_or_forward_depth(next_addr);
386 }
387 klass->InstanceKlass::oop_ps_push_contents(obj, pm);
388 }
389
390 void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
391 if (UseCompressedOops) {
392 oop_ps_push_contents_specialized<narrowOop>(obj, this, pm);
393 } else {
394 oop_ps_push_contents_specialized<oop>(obj, this, pm);
395 }
396 }
397
398 void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
415 // pointer has been installed), then this thread owns
416 // it.
417 if (obj->cas_forward_to(obj, obj_mark)) {
418 // We won any races, we "own" this object.
419 assert(obj == obj->forwardee(), "Sanity");
420
421 _promotion_failed_info.register_copy_failure(obj->size());
422
423 push_contents(obj);
424
425 // Save the mark if needed
426 PSScavenge::oop_promotion_failed(obj, obj_mark);
427 } else {
428 // We lost, someone else "owns" this object
429 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
430
431 // No unallocation to worry about.
432 obj = obj->forwardee();
433 }
434
435 log_develop(gc, scavenge)("{promotion-failure %s " PTR_FORMAT " (%d)}", obj->klass()->internal_name(), p2i(obj), obj->size());
436
437 return obj;
438 }
|