hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp

Print this page
rev 611 : Merge

*** 1,10 **** #ifdef USE_PRAGMA_IDENT_SRC #pragma ident "@(#)psCompactionManager.cpp 1.17 06/07/10 23:27:02 JVM" #endif /* ! * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,10 ---- #ifdef USE_PRAGMA_IDENT_SRC #pragma ident "@(#)psCompactionManager.cpp 1.17 06/07/10 23:27:02 JVM" #endif /* ! * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 31,41 **** PSOldGen* ParCompactionManager::_old_gen = NULL; ParCompactionManager** ParCompactionManager::_manager_array = NULL; OopTaskQueueSet* ParCompactionManager::_stack_array = NULL; ObjectStartArray* ParCompactionManager::_start_array = NULL; ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; ! ChunkTaskQueueSet* ParCompactionManager::_chunk_array = NULL; ParCompactionManager::ParCompactionManager() : _action(CopyAndUpdate) { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); --- 31,41 ---- PSOldGen* ParCompactionManager::_old_gen = NULL; ParCompactionManager** ParCompactionManager::_manager_array = NULL; OopTaskQueueSet* ParCompactionManager::_stack_array = NULL; ObjectStartArray* ParCompactionManager::_start_array = NULL; ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; ! RegionTaskQueueSet* ParCompactionManager::_region_array = NULL; ParCompactionManager::ParCompactionManager() : _action(CopyAndUpdate) { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
*** 47,63 **** marking_stack()->initialize(); // We want the overflow stack to be permanent _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true); ! #ifdef USE_ChunkTaskQueueWithOverflow ! chunk_stack()->initialize(); #else ! chunk_stack()->initialize(); // We want the overflow stack to be permanent ! _chunk_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true); #endif // Note that _revisit_klass_stack is allocated out of the // C heap (as opposed to out of ResourceArena). --- 47,63 ---- marking_stack()->initialize(); // We want the overflow stack to be permanent _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true); ! #ifdef USE_RegionTaskQueueWithOverflow ! region_stack()->initialize(); #else ! region_stack()->initialize(); // We want the overflow stack to be permanent ! _region_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true); #endif // Note that _revisit_klass_stack is allocated out of the // C heap (as opposed to out of ResourceArena).
*** 87,108 **** _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 ); guarantee(_manager_array != NULL, "Could not initialize promotion manager"); _stack_array = new OopTaskQueueSet(parallel_gc_threads); guarantee(_stack_array != NULL, "Count not initialize promotion manager"); ! _chunk_array = new ChunkTaskQueueSet(parallel_gc_threads); ! guarantee(_chunk_array != NULL, "Count not initialize promotion manager"); // Create and register the ParCompactionManager(s) for the worker threads. for(uint i=0; i<parallel_gc_threads; i++) { _manager_array[i] = new ParCompactionManager(); guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); stack_array()->register_queue(i, _manager_array[i]->marking_stack()); ! #ifdef USE_ChunkTaskQueueWithOverflow ! chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue()); #else ! chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()); #endif } // The VMThread gets its own ParCompactionManager, which is not available // for work stealing. --- 87,108 ---- _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 ); guarantee(_manager_array != NULL, "Could not initialize promotion manager"); _stack_array = new OopTaskQueueSet(parallel_gc_threads); guarantee(_stack_array != NULL, "Count not initialize promotion manager"); ! _region_array = new RegionTaskQueueSet(parallel_gc_threads); ! guarantee(_region_array != NULL, "Count not initialize promotion manager"); // Create and register the ParCompactionManager(s) for the worker threads. for(uint i=0; i<parallel_gc_threads; i++) { _manager_array[i] = new ParCompactionManager(); guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); stack_array()->register_queue(i, _manager_array[i]->marking_stack()); ! #ifdef USE_RegionTaskQueueWithOverflow ! region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue()); #else ! region_array()->register_queue(i, _manager_array[i]->region_stack()); #endif } // The VMThread gets its own ParCompactionManager, which is not available // for work stealing.
*** 154,188 **** // Should not be used in the parallel case ShouldNotReachHere(); return NULL; } ! // Save chunk on a stack ! void ParCompactionManager::save_for_processing(size_t chunk_index) { #ifdef ASSERT const ParallelCompactData& sd = PSParallelCompact::summary_data(); ! ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index); ! assert(chunk_ptr->claimed(), "must be claimed"); ! assert(chunk_ptr->_pushed++ == 0, "should only be pushed once"); #endif ! chunk_stack_push(chunk_index); } ! void ParCompactionManager::chunk_stack_push(size_t chunk_index) { ! #ifdef USE_ChunkTaskQueueWithOverflow ! chunk_stack()->save(chunk_index); #else ! if(!chunk_stack()->push(chunk_index)) { ! chunk_overflow_stack()->push(chunk_index); } #endif } ! bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) { ! #ifdef USE_ChunkTaskQueueWithOverflow ! return chunk_stack()->retrieve(chunk_index); #else // Should not be used in the parallel case ShouldNotReachHere(); return false; #endif --- 154,188 ---- // Should not be used in the parallel case ShouldNotReachHere(); return NULL; } ! // Save region on a stack ! void ParCompactionManager::save_for_processing(size_t region_index) { #ifdef ASSERT const ParallelCompactData& sd = PSParallelCompact::summary_data(); ! ParallelCompactData::RegionData* const region_ptr = sd.region(region_index); ! assert(region_ptr->claimed(), "must be claimed"); ! assert(region_ptr->_pushed++ == 0, "should only be pushed once"); #endif ! region_stack_push(region_index); } ! void ParCompactionManager::region_stack_push(size_t region_index) { ! #ifdef USE_RegionTaskQueueWithOverflow ! region_stack()->save(region_index); #else ! if(!region_stack()->push(region_index)) { ! region_overflow_stack()->push(region_index); } #endif } ! bool ParCompactionManager::retrieve_for_processing(size_t& region_index) { ! #ifdef USE_RegionTaskQueueWithOverflow ! return region_stack()->retrieve(region_index); #else // Should not be used in the parallel case ShouldNotReachHere(); return false; #endif
*** 231,248 **** assert(marking_stack()->size() == 0, "Sanity"); assert(overflow_stack()->length() == 0, "Sanity"); } ! void ParCompactionManager::drain_chunk_overflow_stack() { ! size_t chunk_index = (size_t) -1; ! while(chunk_stack()->retrieve_from_overflow(chunk_index)) { ! PSParallelCompact::fill_and_update_chunk(this, chunk_index); } } ! void ParCompactionManager::drain_chunk_stacks() { #ifdef ASSERT ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); MutableSpace* to_space = heap->young_gen()->to_space(); MutableSpace* old_space = heap->old_gen()->object_space(); --- 231,248 ---- assert(marking_stack()->size() == 0, "Sanity"); assert(overflow_stack()->length() == 0, "Sanity"); } ! void ParCompactionManager::drain_region_overflow_stack() { ! size_t region_index = (size_t) -1; ! while(region_stack()->retrieve_from_overflow(region_index)) { ! PSParallelCompact::fill_and_update_region(this, region_index); } } ! void ParCompactionManager::drain_region_stacks() { #ifdef ASSERT ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); MutableSpace* to_space = heap->young_gen()->to_space(); MutableSpace* old_space = heap->old_gen()->object_space();
*** 250,295 **** #endif /* ASSERT */ #if 1 // def DO_PARALLEL - the serial code hasn't been updated do { ! #ifdef USE_ChunkTaskQueueWithOverflow // Drain overflow stack first, so other threads can steal from // claimed stack while we work. ! size_t chunk_index = (size_t) -1; ! while(chunk_stack()->retrieve_from_overflow(chunk_index)) { ! PSParallelCompact::fill_and_update_chunk(this, chunk_index); } ! while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) { ! PSParallelCompact::fill_and_update_chunk(this, chunk_index); } ! } while (!chunk_stack()->is_empty()); #else // Drain overflow stack first, so other threads can steal from // claimed stack while we work. ! while(!chunk_overflow_stack()->is_empty()) { ! size_t chunk_index = chunk_overflow_stack()->pop(); ! PSParallelCompact::fill_and_update_chunk(this, chunk_index); } ! size_t chunk_index = -1; // obj is a reference!!! ! while (chunk_stack()->pop_local(chunk_index)) { // It would be nice to assert about the type of objects we might // pop, but they can come from anywhere, unfortunately. ! PSParallelCompact::fill_and_update_chunk(this, chunk_index); } ! } while((chunk_stack()->size() != 0) || ! (chunk_overflow_stack()->length() != 0)); #endif ! #ifdef USE_ChunkTaskQueueWithOverflow ! assert(chunk_stack()->is_empty(), "Sanity"); #else ! assert(chunk_stack()->size() == 0, "Sanity"); ! assert(chunk_overflow_stack()->length() == 0, "Sanity"); #endif #else oop obj; while (obj = retrieve_for_scanning()) { obj->follow_contents(this); --- 250,295 ---- #endif /* ASSERT */ #if 1 // def DO_PARALLEL - the serial code hasn't been updated do { ! #ifdef USE_RegionTaskQueueWithOverflow // Drain overflow stack first, so other threads can steal from // claimed stack while we work. ! size_t region_index = (size_t) -1; ! while(region_stack()->retrieve_from_overflow(region_index)) { ! PSParallelCompact::fill_and_update_region(this, region_index); } ! while (region_stack()->retrieve_from_stealable_queue(region_index)) { ! PSParallelCompact::fill_and_update_region(this, region_index); } ! } while (!region_stack()->is_empty()); #else // Drain overflow stack first, so other threads can steal from // claimed stack while we work. ! while(!region_overflow_stack()->is_empty()) { ! size_t region_index = region_overflow_stack()->pop(); ! PSParallelCompact::fill_and_update_region(this, region_index); } ! size_t region_index = -1; // obj is a reference!!! ! while (region_stack()->pop_local(region_index)) { // It would be nice to assert about the type of objects we might // pop, but they can come from anywhere, unfortunately. ! PSParallelCompact::fill_and_update_region(this, region_index); } ! } while((region_stack()->size() != 0) || ! (region_overflow_stack()->length() != 0)); #endif ! #ifdef USE_RegionTaskQueueWithOverflow ! assert(region_stack()->is_empty(), "Sanity"); #else ! assert(region_stack()->size() == 0, "Sanity"); ! assert(region_overflow_stack()->length() == 0, "Sanity"); #endif #else oop obj; while (obj = retrieve_for_scanning()) { obj->follow_contents(this);