205 _numWordsAllocated = 0;
206 )
207
208 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
209 use_adaptive_freelists,
210 dictionaryChoice);
211 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
212 if (_cmsSpace == NULL) {
213 vm_exit_during_initialization(
214 "CompactibleFreeListSpace allocation failure");
215 }
216 _cmsSpace->_gen = this;
217
218 _gc_stats = new CMSGCStats();
219
220 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
221 // offsets match. The ability to tell free chunks from objects
222 // depends on this property.
223 debug_only(
224 FreeChunk* junk = NULL;
225 assert(UseCompressedOops ||
226 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
227 "Offset of FreeChunk::_prev within FreeChunk must match"
228 " that of OopDesc::_klass within OopDesc");
229 )
230 if (CollectedHeap::use_parallel_gc_threads()) {
231 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
232 _par_gc_thread_states =
233 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
234 if (_par_gc_thread_states == NULL) {
235 vm_exit_during_initialization("Could not allocate par gc structs");
236 }
237 for (uint i = 0; i < ParallelGCThreads; i++) {
238 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
239 if (_par_gc_thread_states[i] == NULL) {
240 vm_exit_during_initialization("Could not allocate par gc structs");
241 }
242 }
243 } else {
244 _par_gc_thread_states = NULL;
245 }
|
205 _numWordsAllocated = 0;
206 )
207
208 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
209 use_adaptive_freelists,
210 dictionaryChoice);
211 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
212 if (_cmsSpace == NULL) {
213 vm_exit_during_initialization(
214 "CompactibleFreeListSpace allocation failure");
215 }
216 _cmsSpace->_gen = this;
217
218 _gc_stats = new CMSGCStats();
219
220 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
221 // offsets match. The ability to tell free chunks from objects
222 // depends on this property.
223 debug_only(
224 FreeChunk* junk = NULL;
225 assert(UseCompressedKlassPointers ||
226 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
227 "Offset of FreeChunk::_prev within FreeChunk must match"
228 " that of OopDesc::_klass within OopDesc");
229 )
230 if (CollectedHeap::use_parallel_gc_threads()) {
231 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
232 _par_gc_thread_states =
233 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
234 if (_par_gc_thread_states == NULL) {
235 vm_exit_during_initialization("Could not allocate par gc structs");
236 }
237 for (uint i = 0; i < ParallelGCThreads; i++) {
238 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
239 if (_par_gc_thread_states[i] == NULL) {
240 vm_exit_during_initialization("Could not allocate par gc structs");
241 }
242 }
243 } else {
244 _par_gc_thread_states = NULL;
245 }
|