285 "by No_Allocation_Verifier!");
286 // Allocation of an oop can always invoke a safepoint,
287 // hence, the true argument
288 thread->check_for_valid_safepoint_state(true);
289 }
290 }
291 #endif
292
293
294 void CollectedHeap::sample_allocation(Thread* thread, HeapWord* obj,
295 size_t size, size_t overflowed_words) {
296 // Object is allocated, sample it now.
297 HeapMonitoring::object_alloc_do_sample(thread,
298 reinterpret_cast<oopDesc*>(obj),
299 size * HeapWordSize);
300 // Pick a next sample in this case, we allocated right.
301 thread->tlab().pick_next_sample(overflowed_words);
302 }
303
304 HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) {
305 thread->tlab().set_back_actual_end();
306
307 // The tlab could still have space after this sample.
308 return thread->tlab().allocate(size);
309 }
310
311 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
312 // In case the tlab changes, remember if this one wanted a sample.
313 bool should_sample = HeapMonitoring::enabled() && thread->tlab().should_sample();
314
315 HeapWord* obj = NULL;
316 if (should_sample) {
317 // Remember the tlab end to fix up the sampling rate.
318 HeapWord* tlab_old_end = thread->tlab().end();
319 obj = allocate_sampled_object(thread, size);
320
321 // If we did allocate in this tlab, sample it. Otherwise, we wait for the
322 // new tlab's first allocation at the end of this method.
323 if (obj != NULL) {
324 // Fix sample rate by removing the extra words allocated in this last
325 // sample.
326 size_t overflowed_words = pointer_delta(thread->tlab().top(), tlab_old_end);
327 sample_allocation(thread, obj, size, overflowed_words);
328 return obj;
329 }
330 }
331
332 // Retain tlab and allocate object in shared space if
333 // the amount free in the tlab is too large to discard.
334 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
335 thread->tlab().record_slow_allocation(size);
336 return NULL;
337 }
338
|
285 "by No_Allocation_Verifier!");
286 // Allocation of an oop can always invoke a safepoint,
287 // hence, the true argument
288 thread->check_for_valid_safepoint_state(true);
289 }
290 }
291 #endif
292
293
294 void CollectedHeap::sample_allocation(Thread* thread, HeapWord* obj,
295 size_t size, size_t overflowed_words) {
296 // Object is allocated, sample it now.
297 HeapMonitoring::object_alloc_do_sample(thread,
298 reinterpret_cast<oopDesc*>(obj),
299 size * HeapWordSize);
300 // Pick a next sample in this case, we allocated right.
301 thread->tlab().pick_next_sample(overflowed_words);
302 }
303
304 HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) {
305 thread->tlab().set_back_allocation_end();
306
307 // The tlab could still have space after this sample.
308 return thread->tlab().allocate(size);
309 }
310
311 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
312 // In case the tlab changes, remember if this one wanted a sample.
313 bool should_sample = HeapMonitoring::enabled() && thread->tlab().should_sample();
314
315 HeapWord* obj = NULL;
316 if (should_sample) {
317 // Remember the tlab end to fix up the sampling rate.
318 HeapWord* tlab_old_end = thread->tlab().current_end();
319 obj = allocate_sampled_object(thread, size);
320
321 // If we did allocate in this tlab, sample it. Otherwise, we wait for the
322 // new tlab's first allocation at the end of this method.
323 if (obj != NULL) {
324 // Fix sample rate by removing the extra words allocated in this last
325 // sample.
326 size_t overflowed_words = pointer_delta(thread->tlab().top(), tlab_old_end);
327 sample_allocation(thread, obj, size, overflowed_words);
328 return obj;
329 }
330 }
331
332 // Retain tlab and allocate object in shared space if
333 // the amount free in the tlab is too large to discard.
334 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
335 thread->tlab().record_slow_allocation(size);
336 return NULL;
337 }
338
|