294 thread->check_for_valid_safepoint_state(true);
295 }
296 }
297 #endif
298
299 HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) {
300 // We can come here for three reasons:
301 // - We either really did fill the tlab.
302 // - We pretended to everyone we did and we want to sample.
303 // - Both of the above reasons are true at the same time.
304 if (HeapMonitoring::enabled()) {
305 if (thread->tlab().should_sample()) {
306 // If we don't have an object yet, try to allocate it.
307 if (obj == NULL) {
308 // The tlab could still have space after this sample.
309 thread->tlab().set_back_actual_end();
310 obj = thread->tlab().allocate(size);
311 }
312
313 // Is the object allocated now?
314 if (obj != NULL) {
315 // Object is allocated, sample it now.
316 HeapMonitoring::object_alloc_do_sample(thread,
317 reinterpret_cast<oopDesc*>(obj),
318 size);
319 }
320 }
321 }
322
323 thread->tlab().pick_next_sample();
324 return obj;
325 }
326
327 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
328 HeapWord* obj = handle_heap_sampling(thread, NULL, size);
329
330 if (obj != NULL) {
331 return obj;
332 }
333
334 // Retain tlab and allocate object in shared space if
335 // the amount free in the tlab is too large to discard.
336 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
337 thread->tlab().record_slow_allocation(size);
338 return NULL;
339 }
340
341 // Discard tlab and allocate a new one.
342 // To minimize fragmentation, the last TLAB may be smaller than the rest.
343 size_t new_tlab_size = thread->tlab().compute_size(size);
|
294 thread->check_for_valid_safepoint_state(true);
295 }
296 }
297 #endif
298
299 HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) {
300 // We can come here for three reasons:
301 // - We either really did fill the tlab.
302 // - We pretended to everyone we did and we want to sample.
303 // - Both of the above reasons are true at the same time.
304 if (HeapMonitoring::enabled()) {
305 if (thread->tlab().should_sample()) {
306 // If we don't have an object yet, try to allocate it.
307 if (obj == NULL) {
308 // The tlab could still have space after this sample.
309 thread->tlab().set_back_actual_end();
310 obj = thread->tlab().allocate(size);
311 }
312
313 // Is the object allocated now?
314 // If not, this means we have to wait till a new TLAB, let the subsequent
315 // call to handle_heap_sampling pick the next sample.
316 if (obj != NULL) {
317 // Object is allocated, sample it now.
318 HeapMonitoring::object_alloc_do_sample(thread,
319 reinterpret_cast<oopDesc*>(obj),
320 size);
321 // Pick a next sample in this case, we allocated right.
322 thread->tlab().pick_next_sample();
323 }
324 }
325 }
326
327 return obj;
328 }
329
330 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
331 HeapWord* obj = handle_heap_sampling(thread, NULL, size);
332
333 if (obj != NULL) {
334 return obj;
335 }
336
337 // Retain tlab and allocate object in shared space if
338 // the amount free in the tlab is too large to discard.
339 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
340 thread->tlab().record_slow_allocation(size);
341 return NULL;
342 }
343
344 // Discard tlab and allocate a new one.
345 // To minimize fragmentation, the last TLAB may be smaller than the rest.
346 size_t new_tlab_size = thread->tlab().compute_size(size);
|