src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 5190 : 8015107: NPG: Use consistent naming for metaspace concepts


 213     _numWordsAllocated = 0;
 214   )
 215 
 216   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 217                                            use_adaptive_freelists,
 218                                            dictionaryChoice);
 219   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 220   if (_cmsSpace == NULL) {
 221     vm_exit_during_initialization(
 222       "CompactibleFreeListSpace allocation failure");
 223   }
 224   _cmsSpace->_gen = this;
 225 
 226   _gc_stats = new CMSGCStats();
 227 
 228   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 229   // offsets match. The ability to tell free chunks from objects
 230   // depends on this property.
 231   debug_only(
 232     FreeChunk* junk = NULL;
 233     assert(UseCompressedKlassPointers ||
 234            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 235            "Offset of FreeChunk::_prev within FreeChunk must match"
 236            "  that of OopDesc::_klass within OopDesc");
 237   )
 238   if (CollectedHeap::use_parallel_gc_threads()) {
 239     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 240     _par_gc_thread_states =
 241       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 242     if (_par_gc_thread_states == NULL) {
 243       vm_exit_during_initialization("Could not allocate par gc structs");
 244     }
 245     for (uint i = 0; i < ParallelGCThreads; i++) {
 246       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 247       if (_par_gc_thread_states[i] == NULL) {
 248         vm_exit_during_initialization("Could not allocate par gc structs");
 249       }
 250     }
 251   } else {
 252     _par_gc_thread_states = NULL;
 253   }


1390      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1391      if (obj_ptr == NULL) {
1392        return NULL;
1393      }
1394   }
1395   oop obj = oop(obj_ptr);
1396   OrderAccess::storestore();
1397   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1398   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1399   // IMPORTANT: See note on object initialization for CMS above.
1400   // Otherwise, copy the object.  Here we must be careful to insert the
1401   // klass pointer last, since this marks the block as an allocated object.
1402   // Except with compressed oops it's the mark word.
1403   HeapWord* old_ptr = (HeapWord*)old;
1404   // Restore the mark word copied above.
1405   obj->set_mark(m);
1406   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1407   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1408   OrderAccess::storestore();
1409 
1410   if (UseCompressedKlassPointers) {
1411     // Copy gap missed by (aligned) header size calculation below
1412     obj->set_klass_gap(old->klass_gap());
1413   }
1414   if (word_sz > (size_t)oopDesc::header_size()) {
1415     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1416                                  obj_ptr + oopDesc::header_size(),
1417                                  word_sz - oopDesc::header_size());
1418   }
1419 
1420   // Now we can track the promoted object, if necessary.  We take care
1421   // to delay the transition from uninitialized to full object
1422   // (i.e., insertion of klass pointer) until after, so that it
1423   // atomically becomes a promoted object.
1424   if (promoInfo->tracking()) {
1425     promoInfo->track((PromotedObject*)obj, old->klass());
1426   }
1427   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1428   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1429   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1430 




 213     _numWordsAllocated = 0;
 214   )
 215 
 216   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 217                                            use_adaptive_freelists,
 218                                            dictionaryChoice);
 219   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 220   if (_cmsSpace == NULL) {
 221     vm_exit_during_initialization(
 222       "CompactibleFreeListSpace allocation failure");
 223   }
 224   _cmsSpace->_gen = this;
 225 
 226   _gc_stats = new CMSGCStats();
 227 
 228   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 229   // offsets match. The ability to tell free chunks from objects
 230   // depends on this property.
 231   debug_only(
 232     FreeChunk* junk = NULL;
 233     assert(UseCompressedClassPointers ||
 234            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 235            "Offset of FreeChunk::_prev within FreeChunk must match"
 236            "  that of OopDesc::_klass within OopDesc");
 237   )
 238   if (CollectedHeap::use_parallel_gc_threads()) {
 239     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 240     _par_gc_thread_states =
 241       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 242     if (_par_gc_thread_states == NULL) {
 243       vm_exit_during_initialization("Could not allocate par gc structs");
 244     }
 245     for (uint i = 0; i < ParallelGCThreads; i++) {
 246       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 247       if (_par_gc_thread_states[i] == NULL) {
 248         vm_exit_during_initialization("Could not allocate par gc structs");
 249       }
 250     }
 251   } else {
 252     _par_gc_thread_states = NULL;
 253   }


1390      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1391      if (obj_ptr == NULL) {
1392        return NULL;
1393      }
1394   }
1395   oop obj = oop(obj_ptr);
1396   OrderAccess::storestore();
1397   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1398   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1399   // IMPORTANT: See note on object initialization for CMS above.
1400   // Otherwise, copy the object.  Here we must be careful to insert the
1401   // klass pointer last, since this marks the block as an allocated object.
1402   // Except with compressed oops it's the mark word.
1403   HeapWord* old_ptr = (HeapWord*)old;
1404   // Restore the mark word copied above.
1405   obj->set_mark(m);
1406   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1407   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1408   OrderAccess::storestore();
1409 
1410   if (UseCompressedClassPointers) {
1411     // Copy gap missed by (aligned) header size calculation below
1412     obj->set_klass_gap(old->klass_gap());
1413   }
1414   if (word_sz > (size_t)oopDesc::header_size()) {
1415     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1416                                  obj_ptr + oopDesc::header_size(),
1417                                  word_sz - oopDesc::header_size());
1418   }
1419 
1420   // Now we can track the promoted object, if necessary.  We take care
1421   // to delay the transition from uninitialized to full object
1422   // (i.e., insertion of klass pointer) until after, so that it
1423   // atomically becomes a promoted object.
1424   if (promoInfo->tracking()) {
1425     promoInfo->track((PromotedObject*)obj, old->klass());
1426   }
1427   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1428   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1429   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1430