1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)psPromotionManager.cpp 1.30 07/09/25 16:47:41 JVM"
3 #endif
4 /*
5 * Copyright 2002-2006 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
168 PSPromotionManager* manager = manager_array(i);
169 manager->print_stats(i);
170 }
171 }
172
173 #endif // PS_PM_STATS
174
175 PSPromotionManager::PSPromotionManager() {
176 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
177 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
178 _depth_first = UseDepthFirstScavengeOrder;
179
180 // We set the old lab's start array.
181 _old_lab.set_start_array(old_gen()->start_array());
182
183 uint queue_size;
184 if (depth_first()) {
185 claimed_stack_depth()->initialize();
186 queue_size = claimed_stack_depth()->max_elems();
187 // We want the overflow stack to be permanent
188 _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<oop*>(10, true);
189 _overflow_stack_breadth = NULL;
190 } else {
191 claimed_stack_breadth()->initialize();
192 queue_size = claimed_stack_breadth()->max_elems();
193 // We want the overflow stack to be permanent
194 _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
195 _overflow_stack_depth = NULL;
196 }
197
198 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
199 if (_totally_drain) {
200 _target_stack_size = 0;
201 } else {
202 // don't let the target stack size to be more than 1/4 of the entries
203 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
204 (uint) (queue_size / 4));
205 }
206
207 _array_chunk_size = ParGCArrayScanChunk;
208 // let's choose 1.5x the chunk size
226 _young_gen_is_full = false;
227
228 lab_base = old_gen()->object_space()->top();
229 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
230 _old_gen_is_full = false;
231
232 _prefetch_queue.clear();
233
234 #if PS_PM_STATS
235 _total_pushes = 0;
236 _masked_pushes = 0;
237 _overflow_pushes = 0;
238 _max_overflow_length = 0;
239 _arrays_chunked = 0;
240 _array_chunks_processed = 0;
241 _total_steals = 0;
242 _masked_steals = 0;
243 #endif // PS_PM_STATS
244 }
245
246 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
247 assert(depth_first(), "invariant");
248 assert(overflow_stack_depth() != NULL, "invariant");
249 totally_drain = totally_drain || _totally_drain;
250
251 #ifdef ASSERT
252 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
253 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
254 MutableSpace* to_space = heap->young_gen()->to_space();
255 MutableSpace* old_space = heap->old_gen()->object_space();
256 MutableSpace* perm_space = heap->perm_gen()->object_space();
257 #endif /* ASSERT */
258
259 do {
260 oop* p;
261
262 // Drain overflow stack first, so other threads can steal from
263 // claimed stack while we work.
264 while(!overflow_stack_depth()->is_empty()) {
265 p = overflow_stack_depth()->pop();
266 process_popped_location_depth(p);
267 }
268
269 if (totally_drain) {
270 while (claimed_stack_depth()->pop_local(p)) {
271 process_popped_location_depth(p);
272 }
273 } else {
274 while (claimed_stack_depth()->size() > _target_stack_size &&
275 claimed_stack_depth()->pop_local(p)) {
276 process_popped_location_depth(p);
277 }
278 }
279 } while( (totally_drain && claimed_stack_depth()->size() > 0) ||
280 (overflow_stack_depth()->length() > 0) );
281
282 assert(!totally_drain || claimed_stack_empty(), "Sanity");
283 assert(totally_drain ||
284 claimed_stack_depth()->size() <= _target_stack_size,
285 "Sanity");
286 assert(overflow_stack_empty(), "Sanity");
351 if (!_young_lab.is_flushed())
352 _young_lab.flush();
353
354 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
355 if (!_old_lab.is_flushed())
356 _old_lab.flush();
357
358 // Let PSScavenge know if we overflowed
359 if (_young_gen_is_full) {
360 PSScavenge::set_survivor_overflow(true);
361 }
362 }
363
364 //
365 // This method is pretty bulky. It would be nice to split it up
366 // into smaller submethods, but we need to be careful not to hurt
367 // performance.
368 //
369
370 oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
371 assert(PSScavenge::should_scavenge(o), "Sanity");
372
373 oop new_obj = NULL;
374
375 // NOTE! We must be very careful with any methods that access the mark
376 // in o. There may be multiple threads racing on it, and it may be forwarded
377 // at any time. Do not use oop methods for accessing the mark!
378 markOop test_mark = o->mark();
379
380 // The same test as "o->is_forwarded()"
381 if (!test_mark->is_marked()) {
382 bool new_obj_is_tenured = false;
383 size_t new_obj_size = o->size();
384
385 // Find the objects age, MT safe.
386 int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
387 test_mark->displaced_mark_helper()->age() : test_mark->age();
388
389 // Try allocating obj in to-space (unless too old)
390 if (age < PSScavenge::tenuring_threshold()) {
391 new_obj = (oop) _young_lab.allocate(new_obj_size);
482 // we'll chunk it
483 #if PS_PM_STATS
484 ++_arrays_chunked;
485 #endif // PS_PM_STATS
486 oop* const masked_o = mask_chunked_array_oop(o);
487 push_depth(masked_o);
488 #if PS_PM_STATS
489 ++_masked_pushes;
490 #endif // PS_PM_STATS
491 } else {
492 // we'll just push its contents
493 new_obj->push_contents(this);
494 }
495 } else {
496 push_breadth(new_obj);
497 }
498 } else {
499 // We lost, someone else "owns" this object
500 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
501
502 // Unallocate the space used. NOTE! We may have directly allocated
503 // the object. If so, we cannot deallocate it, so we have to test!
504 if (new_obj_is_tenured) {
505 if (!_old_lab.unallocate_object(new_obj)) {
506 // The promotion lab failed to unallocate the object.
507 // We need to overwrite the object with a filler that
508 // contains no interior pointers.
509 MemRegion mr((HeapWord*)new_obj, new_obj_size);
510 // Clean this up and move to oopFactory (see bug 4718422)
511 SharedHeap::fill_region_with_object(mr);
512 }
513 } else {
514 if (!_young_lab.unallocate_object(new_obj)) {
515 // The promotion lab failed to unallocate the object.
516 // We need to overwrite the object with a filler that
517 // contains no interior pointers.
518 MemRegion mr((HeapWord*)new_obj, new_obj_size);
519 // Clean this up and move to oopFactory (see bug 4718422)
520 SharedHeap::fill_region_with_object(mr);
521 }
522 }
523
524 // don't update this before the unallocation!
525 new_obj = o->forwardee();
526 }
527 } else {
528 assert(o->is_forwarded(), "Sanity");
529 new_obj = o->forwardee();
530 }
531
532 #ifdef DEBUG
533 // This code must come after the CAS test, or it will print incorrect
534 // information.
535 if (TraceScavenge) {
536 gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}",
537 PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring",
538 new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
539
540 }
541 #endif
542
543 return new_obj;
544 }
545
546 void PSPromotionManager::process_array_chunk(oop old) {
547 assert(PSChunkLargeArrays, "invariant");
548 assert(old->is_objArray(), "invariant");
549 assert(old->is_forwarded(), "invariant");
550
551 #if PS_PM_STATS
552 ++_array_chunks_processed;
553 #endif // PS_PM_STATS
554
555 oop const obj = old->forwardee();
556
557 int start;
558 int const end = arrayOop(old)->length();
559 if (end > (int) _min_array_size_for_chunking) {
560 // we'll chunk more
561 start = end - _array_chunk_size;
562 assert(start > 0, "invariant");
563 arrayOop(old)->set_length(start);
564 push_depth(mask_chunked_array_oop(old));
565 #if PS_PM_STATS
566 ++_masked_pushes;
567 #endif // PS_PM_STATS
568 } else {
569 // this is the final chunk for this array
570 start = 0;
571 int const actual_length = arrayOop(obj)->length();
572 arrayOop(old)->set_length(actual_length);
573 }
574
575 assert(start < end, "invariant");
576 oop* const base = objArrayOop(obj)->base();
577 oop* p = base + start;
578 oop* const chunk_end = base + end;
579 while (p < chunk_end) {
580 if (PSScavenge::should_scavenge(*p)) {
581 claim_or_forward_depth(p);
582 }
583 ++p;
584 }
585 }
586
587 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
588 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
589
590 // Attempt to CAS in the header.
591 // This tests if the header is still the same as when
592 // this started. If it is the same (i.e., no forwarding
593 // pointer has been installed), then this thread owns
594 // it.
595 if (obj->cas_forward_to(obj, obj_mark)) {
596 // We won any races, we "own" this object.
597 assert(obj == obj->forwardee(), "Sanity");
598
599 if (depth_first()) {
600 obj->push_contents(this);
601 } else {
602 // Don't bother incrementing the age, just push
603 // onto the claimed_stack..
|
1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)psPromotionManager.cpp 1.30 07/09/25 16:47:41 JVM"
3 #endif
4 /*
5 * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
168 PSPromotionManager* manager = manager_array(i);
169 manager->print_stats(i);
170 }
171 }
172
173 #endif // PS_PM_STATS
174
175 PSPromotionManager::PSPromotionManager() {
176 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
177 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
178 _depth_first = UseDepthFirstScavengeOrder;
179
180 // We set the old lab's start array.
181 _old_lab.set_start_array(old_gen()->start_array());
182
183 uint queue_size;
184 if (depth_first()) {
185 claimed_stack_depth()->initialize();
186 queue_size = claimed_stack_depth()->max_elems();
187 // We want the overflow stack to be permanent
188 _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
189 _overflow_stack_breadth = NULL;
190 } else {
191 claimed_stack_breadth()->initialize();
192 queue_size = claimed_stack_breadth()->max_elems();
193 // We want the overflow stack to be permanent
194 _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
195 _overflow_stack_depth = NULL;
196 }
197
198 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
199 if (_totally_drain) {
200 _target_stack_size = 0;
201 } else {
202 // don't let the target stack size to be more than 1/4 of the entries
203 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
204 (uint) (queue_size / 4));
205 }
206
207 _array_chunk_size = ParGCArrayScanChunk;
208 // let's choose 1.5x the chunk size
226 _young_gen_is_full = false;
227
228 lab_base = old_gen()->object_space()->top();
229 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
230 _old_gen_is_full = false;
231
232 _prefetch_queue.clear();
233
234 #if PS_PM_STATS
235 _total_pushes = 0;
236 _masked_pushes = 0;
237 _overflow_pushes = 0;
238 _max_overflow_length = 0;
239 _arrays_chunked = 0;
240 _array_chunks_processed = 0;
241 _total_steals = 0;
242 _masked_steals = 0;
243 #endif // PS_PM_STATS
244 }
245
246
247 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
248 assert(depth_first(), "invariant");
249 assert(overflow_stack_depth() != NULL, "invariant");
250 totally_drain = totally_drain || _totally_drain;
251
252 #ifdef ASSERT
253 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
254 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
255 MutableSpace* to_space = heap->young_gen()->to_space();
256 MutableSpace* old_space = heap->old_gen()->object_space();
257 MutableSpace* perm_space = heap->perm_gen()->object_space();
258 #endif /* ASSERT */
259
260 do {
261 StarTask p;
262
263 // Drain overflow stack first, so other threads can steal from
264 // claimed stack while we work.
265 while(!overflow_stack_depth()->is_empty()) {
266 // linux compiler wants different overloaded operator= in taskqueue to
267 // assign to p that the other compilers don't like.
268 StarTask ptr = overflow_stack_depth()->pop();
269 process_popped_location_depth(ptr);
270 }
271
272 if (totally_drain) {
273 while (claimed_stack_depth()->pop_local(p)) {
274 process_popped_location_depth(p);
275 }
276 } else {
277 while (claimed_stack_depth()->size() > _target_stack_size &&
278 claimed_stack_depth()->pop_local(p)) {
279 process_popped_location_depth(p);
280 }
281 }
282 } while( (totally_drain && claimed_stack_depth()->size() > 0) ||
283 (overflow_stack_depth()->length() > 0) );
284
285 assert(!totally_drain || claimed_stack_empty(), "Sanity");
286 assert(totally_drain ||
287 claimed_stack_depth()->size() <= _target_stack_size,
288 "Sanity");
289 assert(overflow_stack_empty(), "Sanity");
354 if (!_young_lab.is_flushed())
355 _young_lab.flush();
356
357 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
358 if (!_old_lab.is_flushed())
359 _old_lab.flush();
360
361 // Let PSScavenge know if we overflowed
362 if (_young_gen_is_full) {
363 PSScavenge::set_survivor_overflow(true);
364 }
365 }
366
367 //
368 // This method is pretty bulky. It would be nice to split it up
369 // into smaller submethods, but we need to be careful not to hurt
370 // performance.
371 //
372
373 oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
374 assert(PSScavenge::should_scavenge(&o), "Sanity");
375
376 oop new_obj = NULL;
377
378 // NOTE! We must be very careful with any methods that access the mark
379 // in o. There may be multiple threads racing on it, and it may be forwarded
380 // at any time. Do not use oop methods for accessing the mark!
381 markOop test_mark = o->mark();
382
383 // The same test as "o->is_forwarded()"
384 if (!test_mark->is_marked()) {
385 bool new_obj_is_tenured = false;
386 size_t new_obj_size = o->size();
387
388 // Find the objects age, MT safe.
389 int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
390 test_mark->displaced_mark_helper()->age() : test_mark->age();
391
392 // Try allocating obj in to-space (unless too old)
393 if (age < PSScavenge::tenuring_threshold()) {
394 new_obj = (oop) _young_lab.allocate(new_obj_size);
485 // we'll chunk it
486 #if PS_PM_STATS
487 ++_arrays_chunked;
488 #endif // PS_PM_STATS
489 oop* const masked_o = mask_chunked_array_oop(o);
490 push_depth(masked_o);
491 #if PS_PM_STATS
492 ++_masked_pushes;
493 #endif // PS_PM_STATS
494 } else {
495 // we'll just push its contents
496 new_obj->push_contents(this);
497 }
498 } else {
499 push_breadth(new_obj);
500 }
501 } else {
502 // We lost, someone else "owns" this object
503 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
504
505 // Try to deallocate the space. If it was directly allocated we cannot
506 // deallocate it, so we have to test. If the deallocation fails,
507 // overwrite with a filler object.
508 if (new_obj_is_tenured) {
509 if (!_old_lab.unallocate_object(new_obj)) {
510 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
511 }
512 } else if (!_young_lab.unallocate_object(new_obj)) {
513 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
514 }
515
516 // don't update this before the unallocation!
517 new_obj = o->forwardee();
518 }
519 } else {
520 assert(o->is_forwarded(), "Sanity");
521 new_obj = o->forwardee();
522 }
523
524 #ifdef DEBUG
525 // This code must come after the CAS test, or it will print incorrect
526 // information.
527 if (TraceScavenge) {
528 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
529 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
530 new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
531 }
532 #endif
533
534 return new_obj;
535 }
536
537 template <class T> void PSPromotionManager::process_array_chunk_work(
538 oop obj,
539 int start, int end) {
540 assert(start < end, "invariant");
541 T* const base = (T*)objArrayOop(obj)->base();
542 T* p = base + start;
543 T* const chunk_end = base + end;
544 while (p < chunk_end) {
545 if (PSScavenge::should_scavenge(p)) {
546 claim_or_forward_depth(p);
547 }
548 ++p;
549 }
550 }
551
552 void PSPromotionManager::process_array_chunk(oop old) {
553 assert(PSChunkLargeArrays, "invariant");
554 assert(old->is_objArray(), "invariant");
555 assert(old->is_forwarded(), "invariant");
556
557 #if PS_PM_STATS
558 ++_array_chunks_processed;
559 #endif // PS_PM_STATS
560
561 oop const obj = old->forwardee();
562
563 int start;
564 int const end = arrayOop(old)->length();
565 if (end > (int) _min_array_size_for_chunking) {
566 // we'll chunk more
567 start = end - _array_chunk_size;
568 assert(start > 0, "invariant");
569 arrayOop(old)->set_length(start);
570 push_depth(mask_chunked_array_oop(old));
571 #if PS_PM_STATS
572 ++_masked_pushes;
573 #endif // PS_PM_STATS
574 } else {
575 // this is the final chunk for this array
576 start = 0;
577 int const actual_length = arrayOop(obj)->length();
578 arrayOop(old)->set_length(actual_length);
579 }
580
581 if (UseCompressedOops) {
582 process_array_chunk_work<narrowOop>(obj, start, end);
583 } else {
584 process_array_chunk_work<oop>(obj, start, end);
585 }
586 }
587
588 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
589 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
590
591 // Attempt to CAS in the header.
592 // This tests if the header is still the same as when
593 // this started. If it is the same (i.e., no forwarding
594 // pointer has been installed), then this thread owns
595 // it.
596 if (obj->cas_forward_to(obj, obj_mark)) {
597 // We won any races, we "own" this object.
598 assert(obj == obj->forwardee(), "Sanity");
599
600 if (depth_first()) {
601 obj->push_contents(this);
602 } else {
603 // Don't bother incrementing the age, just push
604 // onto the claimed_stack..
|