186 return _full_collections_completed;
187 }
188
189 // Update the _full_collections_completed counter, as appropriate,
190 // at the end of a concurrent GC cycle. Note the conditional update
191 // below to allow this method to be called by a concurrent collector
192 // without synchronizing in any manner with the VM thread (which
193 // may already have initiated a STW full collection "concurrently").
194 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
195 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
196 assert((_full_collections_completed <= _total_full_collections) &&
197 (count <= _total_full_collections),
198 "Can't complete more collections than were started");
199 if (count > _full_collections_completed) {
200 _full_collections_completed = count;
201 ml.notify_all();
202 }
203 return _full_collections_completed;
204 }
205
206
207 #ifndef PRODUCT
208 // Override of memory state checking method in CollectedHeap:
209 // Some collectors (CMS for example) can't have badHeapWordVal written
210 // in the first two words of an object. (For instance , in the case of
211 // CMS these words hold state used to synchronize between certain
212 // (concurrent) GC steps and direct allocating mutators.)
213 // The skip_header_HeapWords() method below, allows us to skip
214 // over the requisite number of HeapWord's. Note that (for
215 // generational collectors) this means that those many words are
216 // skipped in each object, irrespective of the generation in which
217 // that object lives. The resultant loss of precision seems to be
218 // harmless and the pain of avoiding that imprecision appears somewhat
219 // higher than we are prepared to pay for such rudimentary debugging
220 // support.
221 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
222 size_t size) {
223 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
224 // We are asked to check a size in HeapWords,
225 // but the memory is mangled in juint words.
237 bool is_tlab,
238 bool first_only) {
239 HeapWord* res = NULL;
240
241 if (_young_gen->should_allocate(size, is_tlab)) {
242 res = _young_gen->allocate(size, is_tlab);
243 if (res != NULL || first_only) {
244 return res;
245 }
246 }
247
248 if (_old_gen->should_allocate(size, is_tlab)) {
249 res = _old_gen->allocate(size, is_tlab);
250 }
251
252 return res;
253 }
254
255 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
256 bool* gc_overhead_limit_was_exceeded) {
257 return gen_policy()->mem_allocate_work(size,
258 false /* is_tlab */,
259 gc_overhead_limit_was_exceeded);
260 }
261
262 bool GenCollectedHeap::must_clear_all_soft_refs() {
263 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
264 _gc_cause == GCCause::_wb_full_gc;
265 }
266
267 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
268 bool is_tlab, bool run_verification, bool clear_soft_refs,
269 bool restore_marks_for_biased_locking) {
270 FormatBuffer<> title("Collect gen: %s", gen->short_name());
271 GCTraceTime(Trace, gc, phases) t1(title);
272 TraceCollectorStats tcs(gen->counters());
273 TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause());
274
275 gen->stat_record()->invocations++;
276 gen->stat_record()->accumulated_time.start();
277
487 BiasedLocking::restore_marks();
488 }
489 }
490
491 print_heap_after_gc();
492
493 #ifdef TRACESPINNING
494 ParallelTaskTerminator::print_termination_counts();
495 #endif
496 }
497
498 void GenCollectedHeap::register_nmethod(nmethod* nm) {
499 CodeCache::register_scavenge_root_nmethod(nm);
500 }
501
502 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
503 CodeCache::verify_scavenge_root_nmethod(nm);
504 }
505
506 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
507 return gen_policy()->satisfy_failed_allocation(size, is_tlab);
508 }
509
510 #ifdef ASSERT
511 class AssertNonScavengableClosure: public OopClosure {
512 public:
513 virtual void do_oop(oop* p) {
514 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
515 "Referent should not be scavengable."); }
516 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
517 };
518 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
519 #endif
520
521 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
522 ScanningOption so,
523 OopClosure* strong_roots,
524 OopClosure* weak_roots,
525 CLDClosure* strong_cld_closure,
526 CLDClosure* weak_cld_closure,
527 CodeBlobToOopClosure* code_roots) {
870 }
871
872 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
873 assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
874 if (_young_gen->supports_tlab_allocation()) {
875 return _young_gen->tlab_used();
876 }
877 return 0;
878 }
879
880 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
881 assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
882 if (_young_gen->supports_tlab_allocation()) {
883 return _young_gen->unsafe_max_tlab_alloc();
884 }
885 return 0;
886 }
887
888 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
889 bool gc_overhead_limit_was_exceeded;
890 return gen_policy()->mem_allocate_work(size /* size */,
891 true /* is_tlab */,
892 &gc_overhead_limit_was_exceeded);
893 }
894
895 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
896 // from the list headed by "*prev_ptr".
897 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
898 bool first = true;
899 size_t min_size = 0; // "first" makes this conceptually infinite.
900 ScratchBlock **smallest_ptr, *smallest;
901 ScratchBlock *cur = *prev_ptr;
902 while (cur) {
903 assert(*prev_ptr == cur, "just checking");
904 if (first || cur->num_words < min_size) {
905 smallest_ptr = prev_ptr;
906 smallest = cur;
907 min_size = smallest->num_words;
908 first = false;
909 }
910 prev_ptr = &cur->next;
|
186 return _full_collections_completed;
187 }
188
189 // Update the _full_collections_completed counter, as appropriate,
190 // at the end of a concurrent GC cycle. Note the conditional update
191 // below to allow this method to be called by a concurrent collector
192 // without synchronizing in any manner with the VM thread (which
193 // may already have initiated a STW full collection "concurrently").
194 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
195 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
196 assert((_full_collections_completed <= _total_full_collections) &&
197 (count <= _total_full_collections),
198 "Can't complete more collections than were started");
199 if (count > _full_collections_completed) {
200 _full_collections_completed = count;
201 ml.notify_all();
202 }
203 return _full_collections_completed;
204 }
205
206 // Return true if any of the following is true:
207 // . the allocation won't fit into the current young gen heap
208 // . gc locker is occupied (jni critical section)
209 // . heap memory is tight -- the most recent previous collection
210 // was a full collection because a partial collection (would
211 // have) failed and is likely to fail again
212 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
213 size_t young_capacity = young_gen()->capacity_before_gc();
214 return (word_size > heap_word_size(young_capacity))
215 || GCLocker::is_active_and_needs_gc()
216 || incremental_collection_failed();
217 }
218
219 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
220 HeapWord* result = NULL;
221 if (old_gen()->should_allocate(size, is_tlab)) {
222 result = old_gen()->expand_and_allocate(size, is_tlab);
223 }
224 if (result == NULL) {
225 if (young_gen()->should_allocate(size, is_tlab)) {
226 result = young_gen()->expand_and_allocate(size, is_tlab);
227 }
228 }
229 assert(result == NULL || is_in_reserved(result), "result not in heap");
230 return result;
231 }
232
233 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
234 bool is_tlab,
235 bool* gc_overhead_limit_was_exceeded) {
236 debug_only(check_for_valid_allocation_state());
237 assert(no_gc_in_progress(), "Allocation during gc not allowed");
238
239 // In general gc_overhead_limit_was_exceeded should be false so
240 // set it so here and reset it to true only if the gc time
241 // limit is being exceeded as checked below.
242 *gc_overhead_limit_was_exceeded = false;
243
244 HeapWord* result = NULL;
245
246 // Loop until the allocation is satisfied, or unsatisfied after GC.
247 for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
248 HandleMark hm; // Discard any handles allocated in each iteration.
249
250 // First allocation attempt is lock-free.
251 Generation *young = young_gen();
252 assert(young->supports_inline_contig_alloc(),
253 "Otherwise, must do alloc within heap lock");
254 if (young->should_allocate(size, is_tlab)) {
255 result = young->par_allocate(size, is_tlab);
256 if (result != NULL) {
257 assert(is_in_reserved(result), "result not in heap");
258 return result;
259 }
260 }
261 uint gc_count_before; // Read inside the Heap_lock locked region.
262 {
263 MutexLocker ml(Heap_lock);
264 log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
265 // Note that only large objects get a shot at being
266 // allocated in later generations.
267 bool first_only = ! should_try_older_generation_allocation(size);
268
269 result = attempt_allocation(size, is_tlab, first_only);
270 if (result != NULL) {
271 assert(is_in_reserved(result), "result not in heap");
272 return result;
273 }
274
275 if (GCLocker::is_active_and_needs_gc()) {
276 if (is_tlab) {
277 return NULL; // Caller will retry allocating individual object.
278 }
279 if (!is_maximal_no_gc()) {
280 // Try and expand heap to satisfy request.
281 result = expand_heap_and_allocate(size, is_tlab);
282 // Result could be null if we are out of space.
283 if (result != NULL) {
284 return result;
285 }
286 }
287
288 if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
289 return NULL; // We didn't get to do a GC and we didn't get any memory.
290 }
291
292 // If this thread is not in a jni critical section, we stall
293 // the requestor until the critical section has cleared and
294 // GC allowed. When the critical section clears, a GC is
295 // initiated by the last thread exiting the critical section; so
296 // we retry the allocation sequence from the beginning of the loop,
297 // rather than causing more, now probably unnecessary, GC attempts.
298 JavaThread* jthr = JavaThread::current();
299 if (!jthr->in_critical()) {
300 MutexUnlocker mul(Heap_lock);
301 // Wait for JNI critical section to be exited
302 GCLocker::stall_until_clear();
303 gclocker_stalled_count += 1;
304 continue;
305 } else {
306 if (CheckJNICalls) {
307 fatal("Possible deadlock due to allocating while"
308 " in jni critical section");
309 }
310 return NULL;
311 }
312 }
313
314 // Read the gc count while the heap lock is held.
315 gc_count_before = total_collections();
316 }
317
318 VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
319 VMThread::execute(&op);
320 if (op.prologue_succeeded()) {
321 result = op.result();
322 if (op.gc_locked()) {
323 assert(result == NULL, "must be NULL if gc_locked() is true");
324 continue; // Retry and/or stall as necessary.
325 }
326
327 // Allocation has failed and a collection
328 // has been done. If the gc time limit was exceeded the
329 // this time, return NULL so that an out-of-memory
330 // will be thrown. Clear gc_overhead_limit_exceeded
331 // so that the overhead exceeded does not persist.
332
333 const bool limit_exceeded = gen_policy()->size_policy()->gc_overhead_limit_exceeded();
334 const bool softrefs_clear = gen_policy()->all_soft_refs_clear();
335
336 if (limit_exceeded && softrefs_clear) {
337 *gc_overhead_limit_was_exceeded = true;
338 gen_policy()->size_policy()->set_gc_overhead_limit_exceeded(false);
339 if (op.result() != NULL) {
340 CollectedHeap::fill_with_object(op.result(), size);
341 }
342 return NULL;
343 }
344 assert(result == NULL || is_in_reserved(result),
345 "result not in heap");
346 return result;
347 }
348
349 // Give a warning if we seem to be looping forever.
350 if ((QueuedAllocationWarningCount > 0) &&
351 (try_count % QueuedAllocationWarningCount == 0)) {
352 log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
353 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
354 }
355 }
356 }
357
358 #ifndef PRODUCT
359 // Override of memory state checking method in CollectedHeap:
360 // Some collectors (CMS for example) can't have badHeapWordVal written
361 // in the first two words of an object. (For instance , in the case of
362 // CMS these words hold state used to synchronize between certain
363 // (concurrent) GC steps and direct allocating mutators.)
364 // The skip_header_HeapWords() method below, allows us to skip
365 // over the requisite number of HeapWord's. Note that (for
366 // generational collectors) this means that those many words are
367 // skipped in each object, irrespective of the generation in which
368 // that object lives. The resultant loss of precision seems to be
369 // harmless and the pain of avoiding that imprecision appears somewhat
370 // higher than we are prepared to pay for such rudimentary debugging
371 // support.
372 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
373 size_t size) {
374 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
375 // We are asked to check a size in HeapWords,
376 // but the memory is mangled in juint words.
388 bool is_tlab,
389 bool first_only) {
390 HeapWord* res = NULL;
391
392 if (_young_gen->should_allocate(size, is_tlab)) {
393 res = _young_gen->allocate(size, is_tlab);
394 if (res != NULL || first_only) {
395 return res;
396 }
397 }
398
399 if (_old_gen->should_allocate(size, is_tlab)) {
400 res = _old_gen->allocate(size, is_tlab);
401 }
402
403 return res;
404 }
405
406 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
407 bool* gc_overhead_limit_was_exceeded) {
408 return mem_allocate_work(size,
409 false /* is_tlab */,
410 gc_overhead_limit_was_exceeded);
411 }
412
413 bool GenCollectedHeap::must_clear_all_soft_refs() {
414 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
415 _gc_cause == GCCause::_wb_full_gc;
416 }
417
418 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
419 bool is_tlab, bool run_verification, bool clear_soft_refs,
420 bool restore_marks_for_biased_locking) {
421 FormatBuffer<> title("Collect gen: %s", gen->short_name());
422 GCTraceTime(Trace, gc, phases) t1(title);
423 TraceCollectorStats tcs(gen->counters());
424 TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause());
425
426 gen->stat_record()->invocations++;
427 gen->stat_record()->accumulated_time.start();
428
638 BiasedLocking::restore_marks();
639 }
640 }
641
642 print_heap_after_gc();
643
644 #ifdef TRACESPINNING
645 ParallelTaskTerminator::print_termination_counts();
646 #endif
647 }
648
649 void GenCollectedHeap::register_nmethod(nmethod* nm) {
650 CodeCache::register_scavenge_root_nmethod(nm);
651 }
652
653 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
654 CodeCache::verify_scavenge_root_nmethod(nm);
655 }
656
657 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
658 GCCauseSetter x(this, GCCause::_allocation_failure);
659 HeapWord* result = NULL;
660
661 assert(size != 0, "Precondition violated");
662 if (GCLocker::is_active_and_needs_gc()) {
663 // GC locker is active; instead of a collection we will attempt
664 // to expand the heap, if there's room for expansion.
665 if (!is_maximal_no_gc()) {
666 result = expand_heap_and_allocate(size, is_tlab);
667 }
668 return result; // Could be null if we are out of space.
669 } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
670 // Do an incremental collection.
671 do_collection(false, // full
672 false, // clear_all_soft_refs
673 size, // size
674 is_tlab, // is_tlab
675 GenCollectedHeap::OldGen); // max_generation
676 } else {
677 log_trace(gc)(" :: Trying full because partial may fail :: ");
678 // Try a full collection; see delta for bug id 6266275
679 // for the original code and why this has been simplified
680 // with from-space allocation criteria modified and
681 // such allocation moved out of the safepoint path.
682 do_collection(true, // full
683 false, // clear_all_soft_refs
684 size, // size
685 is_tlab, // is_tlab
686 GenCollectedHeap::OldGen); // max_generation
687 }
688
689 result = attempt_allocation(size, is_tlab, false /*first_only*/);
690
691 if (result != NULL) {
692 assert(is_in_reserved(result), "result not in heap");
693 return result;
694 }
695
696 // OK, collection failed, try expansion.
697 result = expand_heap_and_allocate(size, is_tlab);
698 if (result != NULL) {
699 return result;
700 }
701
702 // If we reach this point, we're really out of memory. Try every trick
703 // we can to reclaim memory. Force collection of soft references. Force
704 // a complete compaction of the heap. Any additional methods for finding
705 // free memory should be here, especially if they are expensive. If this
706 // attempt fails, an OOM exception will be thrown.
707 {
708 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
709
710 do_collection(true, // full
711 true, // clear_all_soft_refs
712 size, // size
713 is_tlab, // is_tlab
714 GenCollectedHeap::OldGen); // max_generation
715 }
716
717 result = attempt_allocation(size, is_tlab, false /* first_only */);
718 if (result != NULL) {
719 assert(is_in_reserved(result), "result not in heap");
720 return result;
721 }
722
723 assert(!gen_policy()->should_clear_all_soft_refs(),
724 "Flag should have been handled and cleared prior to this point");
725
726 // What else? We might try synchronous finalization later. If the total
727 // space available is large enough for the allocation, then a more
728 // complete compaction phase than we've tried so far might be
729 // appropriate.
730 return NULL;
731 }
732
733 #ifdef ASSERT
734 class AssertNonScavengableClosure: public OopClosure {
735 public:
736 virtual void do_oop(oop* p) {
737 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
738 "Referent should not be scavengable."); }
739 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
740 };
741 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
742 #endif
743
744 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
745 ScanningOption so,
746 OopClosure* strong_roots,
747 OopClosure* weak_roots,
748 CLDClosure* strong_cld_closure,
749 CLDClosure* weak_cld_closure,
750 CodeBlobToOopClosure* code_roots) {
1093 }
1094
1095 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
1096 assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1097 if (_young_gen->supports_tlab_allocation()) {
1098 return _young_gen->tlab_used();
1099 }
1100 return 0;
1101 }
1102
1103 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1104 assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1105 if (_young_gen->supports_tlab_allocation()) {
1106 return _young_gen->unsafe_max_tlab_alloc();
1107 }
1108 return 0;
1109 }
1110
1111 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1112 bool gc_overhead_limit_was_exceeded;
1113 return mem_allocate_work(size /* size */,
1114 true /* is_tlab */,
1115 &gc_overhead_limit_was_exceeded);
1116 }
1117
1118 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
1119 // from the list headed by "*prev_ptr".
1120 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1121 bool first = true;
1122 size_t min_size = 0; // "first" makes this conceptually infinite.
1123 ScratchBlock **smallest_ptr, *smallest;
1124 ScratchBlock *cur = *prev_ptr;
1125 while (cur) {
1126 assert(*prev_ptr == cur, "just checking");
1127 if (first || cur->num_words < min_size) {
1128 smallest_ptr = prev_ptr;
1129 smallest = cur;
1130 min_size = smallest->num_words;
1131 first = false;
1132 }
1133 prev_ptr = &cur->next;
|