355 356 // CMSIndexedFreeListReplenish should be at least 1 357 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); 358 _promoInfo.setSpace(this); 359 if (UseCMSBestFit) { 360 _fitStrategy = FreeBlockBestFitFirst; 361 } else { 362 _fitStrategy = FreeBlockStrategyNone; 363 } 364 check_free_list_consistency(); 365 366 // Initialize locks for parallel case. 367 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { 368 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 369 "a freelist par lock", true, Mutex::_safepoint_check_never); 370 DEBUG_ONLY( 371 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); 372 ) 373 } 374 _dictionary->set_par_lock(&_parDictionaryAllocLock); 375 } 376 377 // Like CompactibleSpace forward() but always calls cross_threshold() to 378 // update the block offset table. Removed initialize_threshold call because 379 // CFLS does not use a block offset array for contiguous spaces. 380 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, 381 CompactPoint* cp, HeapWord* compact_top) { 382 // q is alive 383 // First check if we should switch compaction space 384 assert(this == cp->space, "'this' should be current compaction space."); 385 size_t compaction_max_size = pointer_delta(end(), compact_top); 386 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), 387 "virtual adjustObjectSize_v() method is not correct"); 388 size_t adjusted_size = adjustObjectSize(size); 389 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, 390 "no small fragments allowed"); 391 assert(minimum_free_block_size() == MinChunkSize, 392 "for de-virtualized reference below"); 393 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize 394 if (adjusted_size + MinChunkSize > compaction_max_size && 560 561 size_t CompactibleFreeListSpace::totalCount() { 562 size_t num = totalCountInIndexedFreeLists(); 563 num += dictionary()->total_count(); 564 if (_smallLinearAllocBlock._word_size != 0) { 565 num++; 566 } 567 return num; 568 } 569 #endif 570 571 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { 572 FreeChunk* fc = (FreeChunk*) p; 573 return fc->is_free(); 574 } 575 576 size_t CompactibleFreeListSpace::used() const { 577 return capacity() - free(); 578 } 579 580 size_t CompactibleFreeListSpace::free() const { 581 // "MT-safe, but not MT-precise"(TM), if you will: i.e. 582 // if you do this while the structures are in flux you 583 // may get an approximate answer only; for instance 584 // because there is concurrent allocation either 585 // directly by mutators or for promotion during a GC. 586 // It's "MT-safe", however, in the sense that you are guaranteed 587 // not to crash and burn, for instance, because of walking 588 // pointers that could disappear as you were walking them. 589 // The approximation is because the various components 590 // that are read below are not read atomically (and 591 // further the computation of totalSizeInIndexedFreeLists() 592 // is itself a non-atomic computation. The normal use of 593 // this is during a resize operation at the end of GC 594 // and at that time you are guaranteed to get the 595 // correct actual value. However, for instance, this is 596 // also read completely asynchronously by the "perf-sampler" 597 // that supports jvmstat, and you are apt to see the values 598 // flicker in such cases. 599 assert(_dictionary != NULL, "No _dictionary?"); 1357 1358 res = allocate_adaptive_freelists(size); 1359 1360 if (res != NULL) { 1361 // check that res does lie in this space! 1362 assert(is_in_reserved(res), "Not in this space!"); 1363 assert(is_aligned((void*)res), "alignment check"); 1364 1365 FreeChunk* fc = (FreeChunk*)res; 1366 fc->markNotFree(); 1367 assert(!fc->is_free(), "shouldn't be marked free"); 1368 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized"); 1369 // Verify that the block offset table shows this to 1370 // be a single block, but not one which is unallocated. 1371 _bt.verify_single_block(res, size); 1372 _bt.verify_not_unallocated(res, size); 1373 // mangle a just allocated object with a distinct pattern. 1374 debug_only(fc->mangleAllocated(size)); 1375 } 1376 1377 return res; 1378 } 1379 1380 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { 1381 assert_lock_strong(freelistLock()); 1382 HeapWord* res = NULL; 1383 assert(size == adjustObjectSize(size), 1384 "use adjustObjectSize() before calling into allocate()"); 1385 1386 // Strategy 1387 // if small 1388 // exact size from small object indexed list if small 1389 // small or large linear allocation block (linAB) as appropriate 1390 // take from lists of greater sized chunks 1391 // else 1392 // dictionary 1393 // small or large linear allocation block if it has the space 1394 // Try allocating exact size from indexTable first 1395 if (size < IndexSetSize) { 1396 res = (HeapWord*) getChunkFromIndexedFreeList(size); | 355 356 // CMSIndexedFreeListReplenish should be at least 1 357 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); 358 _promoInfo.setSpace(this); 359 if (UseCMSBestFit) { 360 _fitStrategy = FreeBlockBestFitFirst; 361 } else { 362 _fitStrategy = FreeBlockStrategyNone; 363 } 364 check_free_list_consistency(); 365 366 // Initialize locks for parallel case. 367 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { 368 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 369 "a freelist par lock", true, Mutex::_safepoint_check_never); 370 DEBUG_ONLY( 371 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); 372 ) 373 } 374 _dictionary->set_par_lock(&_parDictionaryAllocLock); 375 376 _used_stable = 0; 377 } 378 379 // Like CompactibleSpace forward() but always calls cross_threshold() to 380 // update the block offset table. Removed initialize_threshold call because 381 // CFLS does not use a block offset array for contiguous spaces. 382 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, 383 CompactPoint* cp, HeapWord* compact_top) { 384 // q is alive 385 // First check if we should switch compaction space 386 assert(this == cp->space, "'this' should be current compaction space."); 387 size_t compaction_max_size = pointer_delta(end(), compact_top); 388 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), 389 "virtual adjustObjectSize_v() method is not correct"); 390 size_t adjusted_size = adjustObjectSize(size); 391 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, 392 "no small fragments allowed"); 393 assert(minimum_free_block_size() == MinChunkSize, 394 "for de-virtualized reference below"); 395 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize 396 if (adjusted_size + MinChunkSize > compaction_max_size && 562 563 size_t CompactibleFreeListSpace::totalCount() { 564 size_t num = totalCountInIndexedFreeLists(); 565 num += dictionary()->total_count(); 566 if (_smallLinearAllocBlock._word_size != 0) { 567 num++; 568 } 569 return num; 570 } 571 #endif 572 573 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { 574 FreeChunk* fc = (FreeChunk*) p; 575 return fc->is_free(); 576 } 577 578 size_t CompactibleFreeListSpace::used() const { 579 return capacity() - free(); 580 } 581 582 size_t CompactibleFreeListSpace::used_stable() const { 583 return _used_stable; 584 } 585 586 void CompactibleFreeListSpace::recalculate_used_stable() { 587 _used_stable = used(); 588 } 589 590 size_t CompactibleFreeListSpace::free() const { 591 // "MT-safe, but not MT-precise"(TM), if you will: i.e. 592 // if you do this while the structures are in flux you 593 // may get an approximate answer only; for instance 594 // because there is concurrent allocation either 595 // directly by mutators or for promotion during a GC. 596 // It's "MT-safe", however, in the sense that you are guaranteed 597 // not to crash and burn, for instance, because of walking 598 // pointers that could disappear as you were walking them. 599 // The approximation is because the various components 600 // that are read below are not read atomically (and 601 // further the computation of totalSizeInIndexedFreeLists() 602 // is itself a non-atomic computation. The normal use of 603 // this is during a resize operation at the end of GC 604 // and at that time you are guaranteed to get the 605 // correct actual value. However, for instance, this is 606 // also read completely asynchronously by the "perf-sampler" 607 // that supports jvmstat, and you are apt to see the values 608 // flicker in such cases. 609 assert(_dictionary != NULL, "No _dictionary?"); 1367 1368 res = allocate_adaptive_freelists(size); 1369 1370 if (res != NULL) { 1371 // check that res does lie in this space! 1372 assert(is_in_reserved(res), "Not in this space!"); 1373 assert(is_aligned((void*)res), "alignment check"); 1374 1375 FreeChunk* fc = (FreeChunk*)res; 1376 fc->markNotFree(); 1377 assert(!fc->is_free(), "shouldn't be marked free"); 1378 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized"); 1379 // Verify that the block offset table shows this to 1380 // be a single block, but not one which is unallocated. 1381 _bt.verify_single_block(res, size); 1382 _bt.verify_not_unallocated(res, size); 1383 // mangle a just allocated object with a distinct pattern. 1384 debug_only(fc->mangleAllocated(size)); 1385 } 1386 1387 // After allocation, recalculate used space and update used_stable 1388 recalculate_used_stable(); 1389 1390 return res; 1391 } 1392 1393 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { 1394 assert_lock_strong(freelistLock()); 1395 HeapWord* res = NULL; 1396 assert(size == adjustObjectSize(size), 1397 "use adjustObjectSize() before calling into allocate()"); 1398 1399 // Strategy 1400 // if small 1401 // exact size from small object indexed list if small 1402 // small or large linear allocation block (linAB) as appropriate 1403 // take from lists of greater sized chunks 1404 // else 1405 // dictionary 1406 // small or large linear allocation block if it has the space 1407 // Try allocating exact size from indexTable first 1408 if (size < IndexSetSize) { 1409 res = (HeapWord*) getChunkFromIndexedFreeList(size); |