287 assert(!thread->has_pending_exception(),
288 "shouldn't be allocating with pending exception");
289 if (StrictSafepointChecks) {
290 assert(thread->allow_allocation(),
291 "Allocation done by thread for which allocation is blocked "
292 "by No_Allocation_Verifier!");
293 // Allocation of an oop can always invoke a safepoint,
294 // hence, the true argument
295 thread->check_for_valid_safepoint_state(true);
296 }
297 }
298 #endif
299
300 HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) {
301 // We can come here for three reasons:
302 // - We either really did fill the tlab.
303 // - We pretended to everyone we did and we want to sample.
304 // - Both of the above reasons are true at the same time.
305 if (HeapMonitoring::enabled()) {
306 if (thread->tlab().should_sample()) {
307 // If we don't have an object yet, try to allocate it.
308 if (obj == NULL) {
309 // The tlab could still have space after this sample.
310 thread->tlab().set_back_actual_end();
311 obj = thread->tlab().allocate(size);
312 }
313
314 // Is the object allocated now?
315 // If not, this means we have to wait till a new TLAB, let the subsequent
316 // call to handle_heap_sampling pick the next sample.
317 if (obj != NULL) {
318 // Object is allocated, sample it now.
319 HeapMonitoring::object_alloc_do_sample(thread,
320 reinterpret_cast<oopDesc*>(obj),
321 size);
322 // Pick a next sample in this case, we allocated right.
323 thread->tlab().pick_next_sample();
324 }
325 }
326 }
327
328 return obj;
329 }
330
331 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
332 HeapWord* obj = handle_heap_sampling(thread, NULL, size);
333
334 if (obj != NULL) {
335 return obj;
336 }
337
338 // Retain tlab and allocate object in shared space if
339 // the amount free in the tlab is too large to discard.
340 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
341 thread->tlab().record_slow_allocation(size);
342 return NULL;
343 }
344
345 // Discard tlab and allocate a new one.
346 // To minimize fragmentation, the last TLAB may be smaller than the rest.
347 size_t new_tlab_size = thread->tlab().compute_size(size);
348
349 thread->tlab().clear_before_allocation();
350
351 if (new_tlab_size == 0) {
352 return NULL;
357 if (obj == NULL) {
358 return NULL;
359 }
360
361 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
362
363 if (ZeroTLAB) {
364 // ..and clear it.
365 Copy::zero_to_words(obj, new_tlab_size);
366 } else {
367 // ...and zap just allocated object.
368 #ifdef ASSERT
369 // Skip mangling the space corresponding to the object header to
370 // ensure that the returned space is not considered parsable by
371 // any concurrent GC thread.
372 size_t hdr_size = oopDesc::header_size();
373 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
374 #endif // ASSERT
375 }
376 thread->tlab().fill(obj, obj + size, new_tlab_size);
377 handle_heap_sampling(thread, obj, size);
378 return obj;
379 }
380
381 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
382 MemRegion deferred = thread->deferred_card_mark();
383 if (!deferred.is_empty()) {
384 assert(_defer_initial_card_mark, "Otherwise should be empty");
385 {
386 // Verify that the storage points to a parsable object in heap
387 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
388 assert(is_in(old_obj), "Not in allocated heap");
389 assert(!can_elide_initializing_store_barrier(old_obj),
390 "Else should have been filtered in new_store_pre_barrier()");
391 assert(oopDesc::is_oop(old_obj, true), "Not an oop");
392 assert(deferred.word_size() == (size_t)(old_obj->size()),
393 "Mismatch: multiple objects?");
394 }
395 BarrierSet* bs = barrier_set();
396 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
397 bs->write_region(deferred);
398 // "Clear" the deferred_card_mark field
|
287 assert(!thread->has_pending_exception(),
288 "shouldn't be allocating with pending exception");
289 if (StrictSafepointChecks) {
290 assert(thread->allow_allocation(),
291 "Allocation done by thread for which allocation is blocked "
292 "by No_Allocation_Verifier!");
293 // Allocation of an oop can always invoke a safepoint,
294 // hence, the true argument
295 thread->check_for_valid_safepoint_state(true);
296 }
297 }
298 #endif
299
300 HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) {
301 // We can come here for three reasons:
302 // - We either really did fill the tlab.
303 // - We pretended to everyone we did and we want to sample.
304 // - Both of the above reasons are true at the same time.
305 if (HeapMonitoring::enabled()) {
306 if (thread->tlab().should_sample()) {
307 HeapWord *end = thread->tlab().end();
308 thread->tlab().set_back_actual_end();
309
310 // If we don't have an object yet, try to allocate it.
311 if (obj == NULL) {
312 // The tlab could still have space after this sample.
313 obj = thread->tlab().allocate(size);
314 }
315
316 // Is the object allocated now?
317 // If not, this means we have to wait till a new TLAB, let the subsequent
318 // call to handle_heap_sampling pick the next sample.
319 if (obj != NULL) {
320 // Object is allocated, sample it now.
321 HeapMonitoring::object_alloc_do_sample(thread,
322 reinterpret_cast<oopDesc*>(obj),
323 size * HeapWordSize);
324 // Pick a next sample in this case, we allocated right.
325 thread->tlab().pick_next_sample(thread->tlab().top() - end);
326 }
327 }
328 }
329
330 return obj;
331 }
332
333 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
334 HeapWord* obj = handle_heap_sampling(thread, NULL, size);
335 bool should_sample = thread->tlab().should_sample();
336
337 if (obj != NULL) {
338 return obj;
339 }
340
341 // Retain tlab and allocate object in shared space if
342 // the amount free in the tlab is too large to discard.
343 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
344 thread->tlab().record_slow_allocation(size);
345 return NULL;
346 }
347
348 // Discard tlab and allocate a new one.
349 // To minimize fragmentation, the last TLAB may be smaller than the rest.
350 size_t new_tlab_size = thread->tlab().compute_size(size);
351
352 thread->tlab().clear_before_allocation();
353
354 if (new_tlab_size == 0) {
355 return NULL;
360 if (obj == NULL) {
361 return NULL;
362 }
363
364 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
365
366 if (ZeroTLAB) {
367 // ..and clear it.
368 Copy::zero_to_words(obj, new_tlab_size);
369 } else {
370 // ...and zap just allocated object.
371 #ifdef ASSERT
372 // Skip mangling the space corresponding to the object header to
373 // ensure that the returned space is not considered parsable by
374 // any concurrent GC thread.
375 size_t hdr_size = oopDesc::header_size();
376 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
377 #endif // ASSERT
378 }
379 thread->tlab().fill(obj, obj + size, new_tlab_size);
380
381 if (should_sample) {
382 return handle_heap_sampling(thread, obj, size);
383 } else {
384 return obj;
385 }
386 }
387
388 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
389 MemRegion deferred = thread->deferred_card_mark();
390 if (!deferred.is_empty()) {
391 assert(_defer_initial_card_mark, "Otherwise should be empty");
392 {
393 // Verify that the storage points to a parsable object in heap
394 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
395 assert(is_in(old_obj), "Not in allocated heap");
396 assert(!can_elide_initializing_store_barrier(old_obj),
397 "Else should have been filtered in new_store_pre_barrier()");
398 assert(oopDesc::is_oop(old_obj, true), "Not an oop");
399 assert(deferred.word_size() == (size_t)(old_obj->size()),
400 "Mismatch: multiple objects?");
401 }
402 BarrierSet* bs = barrier_set();
403 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
404 bs->write_region(deferred);
405 // "Clear" the deferred_card_mark field
|