282 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
283
284 // Retain tlab and allocate object in shared space if
285 // the amount free in the tlab is too large to discard.
286 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
287 thread->tlab().record_slow_allocation(size);
288 return NULL;
289 }
290
291 // Discard tlab and allocate a new one.
292 // To minimize fragmentation, the last TLAB may be smaller than the rest.
293 size_t new_tlab_size = thread->tlab().compute_size(size);
294
295 thread->tlab().clear_before_allocation();
296
297 if (new_tlab_size == 0) {
298 return NULL;
299 }
300
301 // Allocate a new TLAB...
302 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
303 if (obj == NULL) {
304 return NULL;
305 }
306
307 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
308
309 if (ZeroTLAB) {
310 // ..and clear it.
311 Copy::zero_to_words(obj, new_tlab_size);
312 } else {
313 // ...and zap just allocated object.
314 #ifdef ASSERT
315 // Skip mangling the space corresponding to the object header to
316 // ensure that the returned space is not considered parsable by
317 // any concurrent GC thread.
318 size_t hdr_size = oopDesc::header_size();
319 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
320 #endif // ASSERT
321 }
322 thread->tlab().fill(obj, obj + size, new_tlab_size);
424 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
425 bs->write_region(mr);
426 }
427 }
428 return new_obj;
429 }
430
431 size_t CollectedHeap::filler_array_hdr_size() {
432 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
433 }
434
435 size_t CollectedHeap::filler_array_min_size() {
436 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
437 }
438
439 #ifdef ASSERT
440 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
441 {
442 assert(words >= min_fill_size(), "too small to fill");
443 assert(words % MinObjAlignment == 0, "unaligned size");
444 assert(Universe::heap()->is_in_reserved(start), "not in heap");
445 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
446 }
447
448 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
449 {
450 if (ZapFillerObjects && zap) {
451 Copy::fill_to_words(start + filler_array_hdr_size(),
452 words - filler_array_hdr_size(), 0XDEAFBABE);
453 }
454 }
455 #endif // ASSERT
456
457 void
458 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
459 {
460 assert(words >= filler_array_min_size(), "too small for an array");
461 assert(words <= filler_array_max_size(), "too big for a single object");
462
463 const size_t payload_size = words - filler_array_hdr_size();
464 const size_t len = payload_size * HeapWordSize / sizeof(jint);
465 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
|
282 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
283
284 // Retain tlab and allocate object in shared space if
285 // the amount free in the tlab is too large to discard.
286 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
287 thread->tlab().record_slow_allocation(size);
288 return NULL;
289 }
290
291 // Discard tlab and allocate a new one.
292 // To minimize fragmentation, the last TLAB may be smaller than the rest.
293 size_t new_tlab_size = thread->tlab().compute_size(size);
294
295 thread->tlab().clear_before_allocation();
296
297 if (new_tlab_size == 0) {
298 return NULL;
299 }
300
301 // Allocate a new TLAB...
302 HeapWord* obj = GC::gc()->heap()->allocate_new_tlab(new_tlab_size);
303 if (obj == NULL) {
304 return NULL;
305 }
306
307 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
308
309 if (ZeroTLAB) {
310 // ..and clear it.
311 Copy::zero_to_words(obj, new_tlab_size);
312 } else {
313 // ...and zap just allocated object.
314 #ifdef ASSERT
315 // Skip mangling the space corresponding to the object header to
316 // ensure that the returned space is not considered parsable by
317 // any concurrent GC thread.
318 size_t hdr_size = oopDesc::header_size();
319 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
320 #endif // ASSERT
321 }
322 thread->tlab().fill(obj, obj + size, new_tlab_size);
424 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
425 bs->write_region(mr);
426 }
427 }
428 return new_obj;
429 }
430
431 size_t CollectedHeap::filler_array_hdr_size() {
432 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
433 }
434
435 size_t CollectedHeap::filler_array_min_size() {
436 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
437 }
438
439 #ifdef ASSERT
440 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
441 {
442 assert(words >= min_fill_size(), "too small to fill");
443 assert(words % MinObjAlignment == 0, "unaligned size");
444 assert(GC::gc()->heap()->is_in_reserved(start), "not in heap");
445 assert(GC::gc()->heap()->is_in_reserved(start + words - 1), "not in heap");
446 }
447
448 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
449 {
450 if (ZapFillerObjects && zap) {
451 Copy::fill_to_words(start + filler_array_hdr_size(),
452 words - filler_array_hdr_size(), 0XDEAFBABE);
453 }
454 }
455 #endif // ASSERT
456
457 void
458 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
459 {
460 assert(words >= filler_array_min_size(), "too small for an array");
461 assert(words <= filler_array_max_size(), "too big for a single object");
462
463 const size_t payload_size = words - filler_array_hdr_size();
464 const size_t len = payload_size * HeapWordSize / sizeof(jint);
465 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
|