217 // No GCLABs in this thread, fallback to shared allocation
218 return NULL;
219 }
220 HeapWord* obj = gclab->allocate(size);
221 if (obj != NULL) {
222 return obj;
223 }
224 // Otherwise...
225 return allocate_from_gclab_slow(thread, size);
226 }
227
228 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
229 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
230 // This thread went through the OOM during evac protocol and it is safe to return
231 // the forward pointer. It must not attempt to evacuate any more.
232 return ShenandoahBarrierSet::resolve_forwarded(p);
233 }
234
235 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
236
237 size_t size_no_fwdptr = (size_t) p->size();
238 size_t size_with_fwdptr = size_no_fwdptr + ShenandoahForwarding::word_size();
239
240 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
241
242 bool alloc_from_gclab = true;
243 HeapWord* filler = NULL;
244
245 #ifdef ASSERT
246 if (ShenandoahOOMDuringEvacALot &&
247 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
248 filler = NULL;
249 } else {
250 #endif
251 if (UseTLAB) {
252 filler = allocate_from_gclab(thread, size_with_fwdptr);
253 }
254 if (filler == NULL) {
255 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size_with_fwdptr);
256 filler = allocate_memory(req);
257 alloc_from_gclab = false;
258 }
259 #ifdef ASSERT
260 }
261 #endif
262
263 if (filler == NULL) {
264 control_thread()->handle_alloc_failure_evac(size_with_fwdptr);
265
266 _oom_evac_handler.handle_out_of_memory_during_evacuation();
267
268 return ShenandoahBarrierSet::resolve_forwarded(p);
269 }
270
271 // Copy the object and initialize its forwarding ptr:
272 HeapWord* copy = filler + ShenandoahForwarding::word_size();
273 oop copy_val = oop(copy);
274
275 Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr);
276 ShenandoahForwarding::initialize(oop(copy));
277
278 // Try to install the new forwarding pointer.
279 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
280
281 if (oopDesc::equals_raw(result, p)) {
282 // Successfully evacuated. Our copy is now the public one!
283 shenandoah_assert_correct(NULL, copy_val);
284 return copy_val;
285 } else {
286 // Failed to evacuate. We need to deal with the object that is left behind. Since this
287 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
288 // But if it happens to contain references to evacuated regions, those references would
289 // not get updated for this stale copy during this cycle, and we will crash while scanning
290 // it the next cycle.
291 //
292 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
293 // object will overwrite this stale copy, or the filler object on LAB retirement will
294 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
295 // have to explicitly overwrite the copy with the filler object. With that overwrite,
296 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
297 if (alloc_from_gclab) {
298 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(filler, size_with_fwdptr);
299 } else {
300 fill_with_object(copy, size_no_fwdptr);
301 }
302 shenandoah_assert_correct(NULL, copy_val);
303 shenandoah_assert_correct(NULL, result);
304 return result;
305 }
306 }
307
308 template<bool RESOLVE>
309 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
310 oop obj = oop(entry);
311 if (RESOLVE) {
312 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
313 }
314 return !_marking_context->is_marked(obj);
315 }
316
317 template <class T>
318 inline bool ShenandoahHeap::in_collection_set(T p) const {
319 HeapWord* obj = (HeapWord*) p;
320 assert(collection_set() != NULL, "Sanity");
321 assert(is_in(obj), "should be in heap");
322
353
354 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
355 return _full_gc_in_progress.is_set();
356 }
357
358 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
359 return _full_gc_move_in_progress.is_set();
360 }
361
362 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
363 return _gc_state.is_set(UPDATEREFS);
364 }
365
366 template<class T>
367 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
368 marked_object_iterate(region, cl, region->top());
369 }
370
371 template<class T>
372 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
373 assert(ShenandoahForwarding::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
374 assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
375
376 ShenandoahMarkingContext* const ctx = complete_marking_context();
377 assert(ctx->is_complete(), "sanity");
378
379 MarkBitMap* mark_bit_map = ctx->mark_bit_map();
380 HeapWord* tams = ctx->top_at_mark_start(region);
381
382 size_t skip_bitmap_delta = ShenandoahForwarding::word_size() + 1;
383 size_t skip_objsize_delta = ShenandoahForwarding::word_size() /* + actual obj.size() below */;
384 HeapWord* start = region->bottom() + ShenandoahForwarding::word_size();
385 HeapWord* end = MIN2(tams + ShenandoahForwarding::word_size(), region->end());
386
387 // Step 1. Scan below the TAMS based on bitmap data.
388 HeapWord* limit_bitmap = MIN2(limit, tams);
389
390 // Try to scan the initial candidate. If the candidate is above the TAMS, it would
391 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
392 HeapWord* cb = mark_bit_map->get_next_marked_addr(start, end);
393
394 intx dist = ShenandoahMarkScanPrefetch;
395 if (dist > 0) {
396 // Batched scan that prefetches the oop data, anticipating the access to
397 // either header, oop field, or forwarding pointer. Not that we cannot
398 // touch anything in oop, while it still being prefetched to get enough
399 // time for prefetch to work. This is why we try to scan the bitmap linearly,
400 // disregarding the object size. However, since we know forwarding pointer
401 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
402 // there is no point for prefetching the oop contents, as oop->size() will
403 // touch it prematurely.
404
405 // No variable-length arrays in standard C++, have enough slots to fit
406 // the prefetch distance.
407 static const int SLOT_COUNT = 256;
408 guarantee(dist <= SLOT_COUNT, "adjust slot count");
409 HeapWord* slots[SLOT_COUNT];
410
411 int avail;
412 do {
413 avail = 0;
414 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) {
415 Prefetch::read(cb, ShenandoahForwarding::byte_offset());
416 slots[avail++] = cb;
417 cb += skip_bitmap_delta;
418 if (cb < limit_bitmap) {
419 cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
420 }
421 }
422
423 for (int c = 0; c < avail; c++) {
424 assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams));
425 assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit));
426 oop obj = oop(slots[c]);
427 assert(oopDesc::is_oop(obj), "sanity");
428 assert(ctx->is_marked(obj), "object expected to be marked");
429 cl->do_object(obj);
430 }
431 } while (avail > 0);
432 } else {
433 while (cb < limit_bitmap) {
434 assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams));
435 assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit));
436 oop obj = oop(cb);
437 assert(oopDesc::is_oop(obj), "sanity");
438 assert(ctx->is_marked(obj), "object expected to be marked");
439 cl->do_object(obj);
440 cb += skip_bitmap_delta;
441 if (cb < limit_bitmap) {
442 cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
443 }
444 }
445 }
446
447 // Step 2. Accurate size-based traversal, happens past the TAMS.
448 // This restarts the scan at TAMS, which makes sure we traverse all objects,
449 // regardless of what happened at Step 1.
450 HeapWord* cs = tams + ShenandoahForwarding::word_size();
451 while (cs < limit) {
452 assert (cs > tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
453 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
454 oop obj = oop(cs);
455 assert(oopDesc::is_oop(obj), "sanity");
456 assert(ctx->is_marked(obj), "object expected to be marked");
457 int size = obj->size();
458 cl->do_object(obj);
459 cs += size + skip_objsize_delta;
460 }
461 }
462
463 template <class T>
464 class ShenandoahObjectToOopClosure : public ObjectClosure {
465 T* _cl;
466 public:
467 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
468
469 void do_object(oop obj) {
470 obj->oop_iterate(_cl);
471 }
472 };
473
474 template <class T>
475 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
476 T* _cl;
477 MemRegion _bounds;
478 public:
479 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
|
217 // No GCLABs in this thread, fallback to shared allocation
218 return NULL;
219 }
220 HeapWord* obj = gclab->allocate(size);
221 if (obj != NULL) {
222 return obj;
223 }
224 // Otherwise...
225 return allocate_from_gclab_slow(thread, size);
226 }
227
228 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
229 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
230 // This thread went through the OOM during evac protocol and it is safe to return
231 // the forward pointer. It must not attempt to evacuate any more.
232 return ShenandoahBarrierSet::resolve_forwarded(p);
233 }
234
235 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
236
237 size_t size = p->size();
238
239 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
240
241 bool alloc_from_gclab = true;
242 HeapWord* copy = NULL;
243
244 #ifdef ASSERT
245 if (ShenandoahOOMDuringEvacALot &&
246 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
247 copy = NULL;
248 } else {
249 #endif
250 if (UseTLAB) {
251 copy = allocate_from_gclab(thread, size);
252 }
253 if (copy == NULL) {
254 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
255 copy = allocate_memory(req);
256 alloc_from_gclab = false;
257 }
258 #ifdef ASSERT
259 }
260 #endif
261
262 if (copy == NULL) {
263 control_thread()->handle_alloc_failure_evac(size);
264
265 _oom_evac_handler.handle_out_of_memory_during_evacuation();
266
267 return ShenandoahBarrierSet::resolve_forwarded(p);
268 }
269
270 // Copy the object:
271 Copy::aligned_disjoint_words((HeapWord*) p, copy, size);
272
273 // Try to install the new forwarding pointer.
274 oop copy_val = oop(copy);
275 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
276 if (oopDesc::equals_raw(result, p)) {
277 // Successfully evacuated. Our copy is now the public one!
278 shenandoah_assert_correct(NULL, copy_val);
279 return copy_val;
280 } else {
281 // Failed to evacuate. We need to deal with the object that is left behind. Since this
282 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
283 // But if it happens to contain references to evacuated regions, those references would
284 // not get updated for this stale copy during this cycle, and we will crash while scanning
285 // it the next cycle.
286 //
287 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
288 // object will overwrite this stale copy, or the filler object on LAB retirement will
289 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
290 // have to explicitly overwrite the copy with the filler object. With that overwrite,
291 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
292 if (alloc_from_gclab) {
293 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
294 } else {
295 fill_with_object(copy, size);
296 shenandoah_assert_correct(NULL, copy_val);
297 }
298 shenandoah_assert_correct(NULL, result);
299 return result;
300 }
301 }
302
303 template<bool RESOLVE>
304 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
305 oop obj = oop(entry);
306 if (RESOLVE) {
307 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
308 }
309 return !_marking_context->is_marked(obj);
310 }
311
312 template <class T>
313 inline bool ShenandoahHeap::in_collection_set(T p) const {
314 HeapWord* obj = (HeapWord*) p;
315 assert(collection_set() != NULL, "Sanity");
316 assert(is_in(obj), "should be in heap");
317
348
349 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
350 return _full_gc_in_progress.is_set();
351 }
352
353 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
354 return _full_gc_move_in_progress.is_set();
355 }
356
357 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
358 return _gc_state.is_set(UPDATEREFS);
359 }
360
361 template<class T>
362 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
363 marked_object_iterate(region, cl, region->top());
364 }
365
366 template<class T>
367 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
368 assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
369
370 ShenandoahMarkingContext* const ctx = complete_marking_context();
371 assert(ctx->is_complete(), "sanity");
372
373 MarkBitMap* mark_bit_map = ctx->mark_bit_map();
374 HeapWord* tams = ctx->top_at_mark_start(region);
375
376 size_t skip_bitmap_delta = 1;
377 HeapWord* start = region->bottom();
378 HeapWord* end = MIN2(tams, region->end());
379
380 // Step 1. Scan below the TAMS based on bitmap data.
381 HeapWord* limit_bitmap = MIN2(limit, tams);
382
383 // Try to scan the initial candidate. If the candidate is above the TAMS, it would
384 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
385 HeapWord* cb = mark_bit_map->get_next_marked_addr(start, end);
386
387 intx dist = ShenandoahMarkScanPrefetch;
388 if (dist > 0) {
389 // Batched scan that prefetches the oop data, anticipating the access to
390 // either header, oop field, or forwarding pointer. Not that we cannot
391 // touch anything in oop, while it still being prefetched to get enough
392 // time for prefetch to work. This is why we try to scan the bitmap linearly,
393 // disregarding the object size. However, since we know forwarding pointer
394 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
395 // there is no point for prefetching the oop contents, as oop->size() will
396 // touch it prematurely.
397
398 // No variable-length arrays in standard C++, have enough slots to fit
399 // the prefetch distance.
400 static const int SLOT_COUNT = 256;
401 guarantee(dist <= SLOT_COUNT, "adjust slot count");
402 HeapWord* slots[SLOT_COUNT];
403
404 int avail;
405 do {
406 avail = 0;
407 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) {
408 Prefetch::read(cb, oopDesc::mark_offset_in_bytes());
409 slots[avail++] = cb;
410 cb += skip_bitmap_delta;
411 if (cb < limit_bitmap) {
412 cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
413 }
414 }
415
416 for (int c = 0; c < avail; c++) {
417 assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams));
418 assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit));
419 oop obj = oop(slots[c]);
420 assert(oopDesc::is_oop(obj), "sanity");
421 assert(ctx->is_marked(obj), "object expected to be marked");
422 cl->do_object(obj);
423 }
424 } while (avail > 0);
425 } else {
426 while (cb < limit_bitmap) {
427 assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams));
428 assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit));
429 oop obj = oop(cb);
430 assert(oopDesc::is_oop(obj), "sanity");
431 assert(ctx->is_marked(obj), "object expected to be marked");
432 cl->do_object(obj);
433 cb += skip_bitmap_delta;
434 if (cb < limit_bitmap) {
435 cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
436 }
437 }
438 }
439
440 // Step 2. Accurate size-based traversal, happens past the TAMS.
441 // This restarts the scan at TAMS, which makes sure we traverse all objects,
442 // regardless of what happened at Step 1.
443 HeapWord* cs = tams;
444 while (cs < limit) {
445 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
446 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
447 oop obj = oop(cs);
448 assert(oopDesc::is_oop(obj), "sanity");
449 assert(ctx->is_marked(obj), "object expected to be marked");
450 int size = obj->size();
451 cl->do_object(obj);
452 cs += size;
453 }
454 }
455
456 template <class T>
457 class ShenandoahObjectToOopClosure : public ObjectClosure {
458 T* _cl;
459 public:
460 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
461
462 void do_object(oop obj) {
463 obj->oop_iterate(_cl);
464 }
465 };
466
467 template <class T>
468 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
469 T* _cl;
470 MemRegion _bounds;
471 public:
472 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
|