370 // Retain tlab and allocate object in shared space if
371 // the amount free in the tlab is too large to discard.
372 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
373 thread->tlab().record_slow_allocation(size);
374 return NULL;
375 }
376
377 // Discard tlab and allocate a new one.
378 // To minimize fragmentation, the last TLAB may be smaller than the rest.
379 size_t new_tlab_size = thread->tlab().compute_size(size);
380
381 thread->tlab().clear_before_allocation();
382
383 if (new_tlab_size == 0) {
384 return NULL;
385 }
386
387 // Allocate a new TLAB requesting new_tlab_size. Any size
388 // between minimal and new_tlab_size is accepted.
389 size_t actual_tlab_size = 0;
390 size_t minimal_tlab_size = MAX2(ThreadLocalAllocBuffer::compute_min_size(size), MinTLABSize);
391 HeapWord* obj = Universe::heap()->allocate_new_tlab(minimal_tlab_size, new_tlab_size, &actual_tlab_size);
392 if (obj == NULL) {
393 assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
394 minimal_tlab_size, new_tlab_size, actual_tlab_size);
395 return NULL;
396 }
397 assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
398 p2i(obj), minimal_tlab_size, new_tlab_size);
399
400 AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
401
402 if (ZeroTLAB) {
403 // ..and clear it.
404 Copy::zero_to_words(obj, actual_tlab_size);
405 } else {
406 // ...and zap just allocated object.
407 #ifdef ASSERT
408 // Skip mangling the space corresponding to the object header to
409 // ensure that the returned space is not considered parsable by
410 // any concurrent GC thread.
411 size_t hdr_size = oopDesc::header_size();
412 Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
413 #endif // ASSERT
414 }
415 thread->tlab().fill(obj, obj + size, actual_tlab_size);
416 return obj;
417 }
418
496 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
497 {
498 DEBUG_ONLY(fill_args_check(start, words);)
499 HandleMark hm; // Free handles before leaving.
500
501 // Multiple objects may be required depending on the filler array maximum size. Fill
502 // the range up to that with objects that are filler_array_max_size sized. The
503 // remainder is filled with a single object.
504 const size_t min = min_fill_size();
505 const size_t max = filler_array_max_size();
506 while (words > max) {
507 const size_t cur = (words - max) >= min ? max : max - min;
508 fill_with_array(start, cur, zap);
509 start += cur;
510 words -= cur;
511 }
512
513 fill_with_object_impl(start, words, zap);
514 }
515
516 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_word_size,
517 size_t desired_word_size,
518 size_t* actual_word_size) {
519 guarantee(false, "thread-local allocation buffers not supported");
520 return NULL;
521 }
522
523 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
524 // The second disjunct in the assertion below makes a concession
525 // for the start-up verification done while the VM is being
526 // created. Callers be careful that you know that mutators
527 // aren't going to interfere -- for instance, this is permissible
528 // if we are still single-threaded and have either not yet
529 // started allocating (nothing much to verify) or we have
530 // started allocating but are now a full-fledged JavaThread
531 // (and have thus made our TLAB's) available for filling.
532 assert(SafepointSynchronize::is_at_safepoint() ||
533 !is_init_completed(),
534 "Should only be called at a safepoint or at start-up"
535 " otherwise concurrent mutator activity may make heap "
536 " unparsable again");
537 const bool use_tlab = UseTLAB;
538 // The main thread starts allocating via a TLAB even before it
|
370 // Retain tlab and allocate object in shared space if
371 // the amount free in the tlab is too large to discard.
372 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
373 thread->tlab().record_slow_allocation(size);
374 return NULL;
375 }
376
377 // Discard tlab and allocate a new one.
378 // To minimize fragmentation, the last TLAB may be smaller than the rest.
379 size_t new_tlab_size = thread->tlab().compute_size(size);
380
381 thread->tlab().clear_before_allocation();
382
383 if (new_tlab_size == 0) {
384 return NULL;
385 }
386
387 // Allocate a new TLAB requesting new_tlab_size. Any size
388 // between minimal and new_tlab_size is accepted.
389 size_t actual_tlab_size = 0;
390 size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);
391 HeapWord* obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
392 if (obj == NULL) {
393 assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
394 min_tlab_size, new_tlab_size, actual_tlab_size);
395 return NULL;
396 }
397 assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
398 p2i(obj), min_tlab_size, new_tlab_size);
399
400 AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
401
402 if (ZeroTLAB) {
403 // ..and clear it.
404 Copy::zero_to_words(obj, actual_tlab_size);
405 } else {
406 // ...and zap just allocated object.
407 #ifdef ASSERT
408 // Skip mangling the space corresponding to the object header to
409 // ensure that the returned space is not considered parsable by
410 // any concurrent GC thread.
411 size_t hdr_size = oopDesc::header_size();
412 Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
413 #endif // ASSERT
414 }
415 thread->tlab().fill(obj, obj + size, actual_tlab_size);
416 return obj;
417 }
418
496 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
497 {
498 DEBUG_ONLY(fill_args_check(start, words);)
499 HandleMark hm; // Free handles before leaving.
500
501 // Multiple objects may be required depending on the filler array maximum size. Fill
502 // the range up to that with objects that are filler_array_max_size sized. The
503 // remainder is filled with a single object.
504 const size_t min = min_fill_size();
505 const size_t max = filler_array_max_size();
506 while (words > max) {
507 const size_t cur = (words - max) >= min ? max : max - min;
508 fill_with_array(start, cur, zap);
509 start += cur;
510 words -= cur;
511 }
512
513 fill_with_object_impl(start, words, zap);
514 }
515
516 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
517 size_t requested_size,
518 size_t* actual_size) {
519 guarantee(false, "thread-local allocation buffers not supported");
520 return NULL;
521 }
522
523 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
524 // The second disjunct in the assertion below makes a concession
525 // for the start-up verification done while the VM is being
526 // created. Callers be careful that you know that mutators
527 // aren't going to interfere -- for instance, this is permissible
528 // if we are still single-threaded and have either not yet
529 // started allocating (nothing much to verify) or we have
530 // started allocating but are now a full-fledged JavaThread
531 // (and have thus made our TLAB's) available for filling.
532 assert(SafepointSynchronize::is_at_safepoint() ||
533 !is_init_completed(),
534 "Should only be called at a safepoint or at start-up"
535 " otherwise concurrent mutator activity may make heap "
536 " unparsable again");
537 const bool use_tlab = UseTLAB;
538 // The main thread starts allocating via a TLAB even before it
|