339 #ifdef ASSERT
340 void CollectedHeap::check_for_valid_allocation_state() {
341 Thread *thread = Thread::current();
342 // How to choose between a pending exception and a potential
343 // OutOfMemoryError? Don't allow pending exceptions.
344 // This is a VM policy failure, so how do we exhaustively test it?
345 assert(!thread->has_pending_exception(),
346 "shouldn't be allocating with pending exception");
347 if (StrictSafepointChecks) {
348 assert(thread->allow_allocation(),
349 "Allocation done by thread for which allocation is blocked "
350 "by No_Allocation_Verifier!");
351 // Allocation of an oop can always invoke a safepoint,
352 // hence, the true argument
353 thread->check_for_valid_safepoint_state(true);
354 }
355 }
356 #endif
357
358 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
359
360 // Retain tlab and allocate object in shared space if
361 // the amount free in the tlab is too large to discard.
362 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
363 thread->tlab().record_slow_allocation(size);
364 return NULL;
365 }
366
367 // Discard tlab and allocate a new one.
368 // To minimize fragmentation, the last TLAB may be smaller than the rest.
369 size_t new_tlab_size = thread->tlab().compute_size(size);
370
371 thread->tlab().clear_before_allocation();
372
373 if (new_tlab_size == 0) {
374 return NULL;
375 }
376
377 // Allocate a new TLAB...
378 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
379 if (obj == NULL) {
380 return NULL;
381 }
382
383 AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
384
385 if (ZeroTLAB) {
386 // ..and clear it.
387 Copy::zero_to_words(obj, new_tlab_size);
388 } else {
389 // ...and zap just allocated object.
390 #ifdef ASSERT
391 // Skip mangling the space corresponding to the object header to
392 // ensure that the returned space is not considered parsable by
393 // any concurrent GC thread.
394 size_t hdr_size = oopDesc::header_size();
395 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
396 #endif // ASSERT
397 }
398 thread->tlab().fill(obj, obj + size, new_tlab_size);
399 return obj;
400 }
401
402 size_t CollectedHeap::max_tlab_size() const {
403 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
404 // This restriction could be removed by enabling filling with multiple arrays.
405 // If we compute that the reasonable way as
406 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
407 // we'll overflow on the multiply, so we do the divide first.
408 // We actually lose a little by dividing first,
409 // but that just makes the TLAB somewhat smaller than the biggest array,
410 // which is fine, since we'll be able to fill that.
411 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
412 sizeof(jint) *
413 ((juint) max_jint / (size_t) HeapWordSize);
414 return align_down(max_int_size, MinObjAlignment);
415 }
416
417 size_t CollectedHeap::filler_array_hdr_size() {
|
339 #ifdef ASSERT
340 void CollectedHeap::check_for_valid_allocation_state() {
341 Thread *thread = Thread::current();
342 // How to choose between a pending exception and a potential
343 // OutOfMemoryError? Don't allow pending exceptions.
344 // This is a VM policy failure, so how do we exhaustively test it?
345 assert(!thread->has_pending_exception(),
346 "shouldn't be allocating with pending exception");
347 if (StrictSafepointChecks) {
348 assert(thread->allow_allocation(),
349 "Allocation done by thread for which allocation is blocked "
350 "by No_Allocation_Verifier!");
351 // Allocation of an oop can always invoke a safepoint,
352 // hence, the true argument
353 thread->check_for_valid_safepoint_state(true);
354 }
355 }
356 #endif
357
358 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
359 HeapWord* obj = NULL;
360
361 // In assertion mode, check that there was a sampling collector present
362 // in the stack. This enforces checking that no path is without a sampling
363 // collector.
364 assert(thread->heap_sampler().sampling_collector_present(),
365 "Sampling collector not present.");
366
367 if (ThreadHeapSampler::enabled()) {
368 // Try to allocate the sampled object from TLAB, it is possible a sample
369 // point was put and the TLAB still has space.
370 obj = thread->tlab().allocate_sampled_object(size);
371
372 if (obj != NULL) {
373 return obj;
374 }
375 }
376
377 // Retain tlab and allocate object in shared space if
378 // the amount free in the tlab is too large to discard.
379 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
380 thread->tlab().record_slow_allocation(size);
381 return NULL;
382 }
383
384 // Discard tlab and allocate a new one.
385 // To minimize fragmentation, the last TLAB may be smaller than the rest.
386 size_t new_tlab_size = thread->tlab().compute_size(size);
387
388 thread->tlab().clear_before_allocation();
389
390 if (new_tlab_size == 0) {
391 return NULL;
392 }
393
394 // Allocate a new TLAB...
395 obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
396 if (obj == NULL) {
397 return NULL;
398 }
399
400 AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
401
402 if (ZeroTLAB) {
403 // ..and clear it.
404 Copy::zero_to_words(obj, new_tlab_size);
405 } else {
406 // ...and zap just allocated object.
407 #ifdef ASSERT
408 // Skip mangling the space corresponding to the object header to
409 // ensure that the returned space is not considered parsable by
410 // any concurrent GC thread.
411 size_t hdr_size = oopDesc::header_size();
412 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
413 #endif // ASSERT
414 }
415
416 // Send the thread information about this allocation in case a sample is
417 // requested.
418 if (ThreadHeapSampler::enabled()) {
419 size_t tlab_bytes_since_last_sample = thread->tlab().bytes_since_last_sample_point();
420 thread->heap_sampler().check_for_sampling(obj, size, tlab_bytes_since_last_sample);
421 }
422
423 thread->tlab().fill(obj, obj + size, new_tlab_size);
424 return obj;
425 }
426
427 size_t CollectedHeap::max_tlab_size() const {
428 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
429 // This restriction could be removed by enabling filling with multiple arrays.
430 // If we compute that the reasonable way as
431 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
432 // we'll overflow on the multiply, so we do the divide first.
433 // We actually lose a little by dividing first,
434 // but that just makes the TLAB somewhat smaller than the biggest array,
435 // which is fine, since we'll be able to fill that.
436 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
437 sizeof(jint) *
438 ((juint) max_jint / (size_t) HeapWordSize);
439 return align_down(max_int_size, MinObjAlignment);
440 }
441
442 size_t CollectedHeap::filler_array_hdr_size() {
|