< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page




 224   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 225   add_chunk_to_list(&_chunk_list, elem);
 226   _chunks_in_chunk_list++;
 227 }
 228 
 229 void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) {
 230   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 231   add_chunk_to_list(&_free_list, elem);
 232 }
 233 
 234 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
 235   OopChunk* result = *list;
 236   if (result != NULL) {
 237     *list = (*list)->next;
 238   }
 239   return result;
 240 }
 241 
 242 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 243   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);


 244   _chunks_in_chunk_list--;
 245   return remove_chunk_from_list(&_chunk_list);

 246 }
 247 
 248 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 249   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 250   return remove_chunk_from_list(&_free_list);
 251 }
 252 
 253 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
 254   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 255   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 256   // wraparound of _hwm.
 257   if (_hwm >= _chunk_capacity) {
 258     return NULL;
 259   }
 260 
 261   size_t cur_idx = Atomic::add(1, &_hwm) - 1;
 262   if (cur_idx >= _chunk_capacity) {
 263     return NULL;
 264   }
 265 
 266   OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
 267   result->next = NULL;
 268   return result;
 269 }
 270 
 271 bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
 272   // Get a new chunk.
 273   OopChunk* new_chunk = remove_chunk_from_list(&_free_list);
 274 
 275   if (new_chunk == NULL) {
 276     // Did not get a chunk from the free list. Allocate from backing memory.
 277     new_chunk = allocate_new_chunk();
 278   }
 279 
 280   if (new_chunk == NULL) {
 281     _out_of_memory = true;
 282     return false;
 283   }
 284 
 285   Copy::conjoint_oops_atomic(ptr_arr, new_chunk->data, OopsPerChunk);
 286 
 287   add_chunk_to_list(&_chunk_list, new_chunk);
 288   Atomic::inc(&_chunks_in_chunk_list);
 289 
 290   return true;
 291 }
 292 
 293 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
 294   OopChunk* cur = remove_chunk_from_list(&_chunk_list);
 295 
 296   if (cur == NULL) {
 297     return false;
 298   }
 299 
 300   Atomic::dec(&_chunks_in_chunk_list);
 301 
 302   Copy::conjoint_oops_atomic(cur->data, ptr_arr, OopsPerChunk);
 303 
 304   add_chunk_to_list(&_free_list, cur);
 305   return true;
 306 }
 307 
 308 void G1CMMarkStack::set_empty() {
 309   _chunks_in_chunk_list = 0;
 310   _hwm = 0;
 311   clear_out_of_memory();
 312   _chunk_list = NULL;
 313   _free_list = NULL;
 314 }
 315 
 316 G1CMRootRegions::G1CMRootRegions() :
 317   _cm(NULL), _scan_in_progress(false),
 318   _should_abort(false), _claimed_survivor_index(0) { }
 319 
 320 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 321   _survivors = survivors;
 322   _cm = cm;
 323 }
 324 




 224   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 225   add_chunk_to_list(&_chunk_list, elem);
 226   _chunks_in_chunk_list++;
 227 }
 228 
 229 void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) {
 230   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 231   add_chunk_to_list(&_free_list, elem);
 232 }
 233 
 234 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
 235   OopChunk* result = *list;
 236   if (result != NULL) {
 237     *list = (*list)->next;
 238   }
 239   return result;
 240 }
 241 
 242 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 243   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 244   OopChunk* result = remove_chunk_from_list(&_chunk_list);
 245   if (result != NULL) {
 246     _chunks_in_chunk_list--;
 247   }
 248   return result;
 249 }
 250 
 251 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 252   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 253   return remove_chunk_from_list(&_free_list);
 254 }
 255 
 256 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
 257   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 258   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 259   // wraparound of _hwm.
 260   if (_hwm >= _chunk_capacity) {
 261     return NULL;
 262   }
 263 
 264   size_t cur_idx = Atomic::add(1, &_hwm) - 1;
 265   if (cur_idx >= _chunk_capacity) {
 266     return NULL;
 267   }
 268 
 269   OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
 270   result->next = NULL;
 271   return result;
 272 }
 273 
 274 bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
 275   // Get a new chunk.
 276   OopChunk* new_chunk = remove_chunk_from_free_list();
 277 
 278   if (new_chunk == NULL) {
 279     // Did not get a chunk from the free list. Allocate from backing memory.
 280     new_chunk = allocate_new_chunk();
 281   }
 282 
 283   if (new_chunk == NULL) {
 284     _out_of_memory = true;
 285     return false;
 286   }
 287 
 288   Copy::conjoint_oops_atomic(ptr_arr, new_chunk->data, OopsPerChunk);
 289 
 290   add_chunk_to_chunk_list(new_chunk);

 291 
 292   return true;
 293 }
 294 
 295 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
 296   OopChunk* cur = remove_chunk_from_chunk_list();
 297 
 298   if (cur == NULL) {
 299     return false;
 300   }
 301 


 302   Copy::conjoint_oops_atomic(cur->data, ptr_arr, OopsPerChunk);
 303 
 304   add_chunk_to_free_list(cur);
 305   return true;
 306 }
 307 
 308 void G1CMMarkStack::set_empty() {
 309   _chunks_in_chunk_list = 0;
 310   _hwm = 0;
 311   clear_out_of_memory();
 312   _chunk_list = NULL;
 313   _free_list = NULL;
 314 }
 315 
 316 G1CMRootRegions::G1CMRootRegions() :
 317   _cm(NULL), _scan_in_progress(false),
 318   _should_abort(false), _claimed_survivor_index(0) { }
 319 
 320 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 321   _survivors = survivors;
 322   _cm = cm;
 323 }
 324 


< prev index next >